mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-17 04:48:30 +00:00
Compare commits
2 Commits
bugfix/utc
...
bugfix/sup
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0af6b05e16 | ||
|
|
f5d01b9efc |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -21,5 +21,4 @@ crew_tasks_output.json
|
|||||||
.mypy_cache
|
.mypy_cache
|
||||||
.ruff_cache
|
.ruff_cache
|
||||||
.venv
|
.venv
|
||||||
agentops.log
|
agentops.log
|
||||||
test_flow.html
|
|
||||||
18
README.md
18
README.md
@@ -1,18 +1,10 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
# **CrewAI**
|
# **CrewAI**
|
||||||
|
|
||||||
**CrewAI**: Production-grade framework for orchestrating sophisticated AI agent systems. From simple automations to complex real-world applications, CrewAI provides precise control and deep customization. By fostering collaborative intelligence through flexible, production-ready architecture, CrewAI empowers agents to work together seamlessly, tackling complex business challenges with predictable, consistent results.
|
🤖 **CrewAI**: Production-grade framework for orchestrating sophisticated AI agent systems. From simple automations to complex real-world applications, CrewAI provides precise control and deep customization. By fostering collaborative intelligence through flexible, production-ready architecture, CrewAI empowers agents to work together seamlessly, tackling complex business challenges with predictable, consistent results.
|
||||||
|
|
||||||
**CrewAI Enterprise**
|
|
||||||
Want to plan, build (+ no code), deploy, monitor and interare your agents: [CrewAI Enterprise](https://www.crewai.com/enterprise). Designed for complex, real-world applications, our enterprise solution offers:
|
|
||||||
|
|
||||||
- **Seamless Integrations**
|
|
||||||
- **Scalable & Secure Deployment**
|
|
||||||
- **Actionable Insights**
|
|
||||||
- **24/7 Support**
|
|
||||||
|
|
||||||
<h3>
|
<h3>
|
||||||
|
|
||||||
@@ -198,7 +190,7 @@ research_task:
|
|||||||
description: >
|
description: >
|
||||||
Conduct a thorough research about {topic}
|
Conduct a thorough research about {topic}
|
||||||
Make sure you find any interesting and relevant information given
|
Make sure you find any interesting and relevant information given
|
||||||
the current year is 2025.
|
the current year is 2024.
|
||||||
expected_output: >
|
expected_output: >
|
||||||
A list with 10 bullet points of the most relevant information about {topic}
|
A list with 10 bullet points of the most relevant information about {topic}
|
||||||
agent: researcher
|
agent: researcher
|
||||||
@@ -400,7 +392,7 @@ class AdvancedAnalysisFlow(Flow[MarketState]):
|
|||||||
goal="Gather and validate supporting market data",
|
goal="Gather and validate supporting market data",
|
||||||
backstory="You excel at finding and correlating multiple data sources"
|
backstory="You excel at finding and correlating multiple data sources"
|
||||||
)
|
)
|
||||||
|
|
||||||
analysis_task = Task(
|
analysis_task = Task(
|
||||||
description="Analyze {sector} sector data for the past {timeframe}",
|
description="Analyze {sector} sector data for the past {timeframe}",
|
||||||
expected_output="Detailed market analysis with confidence score",
|
expected_output="Detailed market analysis with confidence score",
|
||||||
@@ -411,7 +403,7 @@ class AdvancedAnalysisFlow(Flow[MarketState]):
|
|||||||
expected_output="Corroborating evidence and potential contradictions",
|
expected_output="Corroborating evidence and potential contradictions",
|
||||||
agent=researcher
|
agent=researcher
|
||||||
)
|
)
|
||||||
|
|
||||||
# Demonstrate crew autonomy
|
# Demonstrate crew autonomy
|
||||||
analysis_crew = Crew(
|
analysis_crew = Crew(
|
||||||
agents=[analyst, researcher],
|
agents=[analyst, researcher],
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ Think of an agent as a specialized team member with specific skills, expertise,
|
|||||||
| **Max Retry Limit** _(optional)_ | `max_retry_limit` | `int` | Maximum number of retries when an error occurs. Default is 2. |
|
| **Max Retry Limit** _(optional)_ | `max_retry_limit` | `int` | Maximum number of retries when an error occurs. Default is 2. |
|
||||||
| **Respect Context Window** _(optional)_ | `respect_context_window` | `bool` | Keep messages under context window size by summarizing. Default is True. |
|
| **Respect Context Window** _(optional)_ | `respect_context_window` | `bool` | Keep messages under context window size by summarizing. Default is True. |
|
||||||
| **Code Execution Mode** _(optional)_ | `code_execution_mode` | `Literal["safe", "unsafe"]` | Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct). Default is 'safe'. |
|
| **Code Execution Mode** _(optional)_ | `code_execution_mode` | `Literal["safe", "unsafe"]` | Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct). Default is 'safe'. |
|
||||||
| **Embedder** _(optional)_ | `embedder` | `Optional[Dict[str, Any]]` | Configuration for the embedder used by the agent. |
|
| **Embedder Config** _(optional)_ | `embedder_config` | `Optional[Dict[str, Any]]` | Configuration for the embedder used by the agent. |
|
||||||
| **Knowledge Sources** _(optional)_ | `knowledge_sources` | `Optional[List[BaseKnowledgeSource]]` | Knowledge sources available to the agent. |
|
| **Knowledge Sources** _(optional)_ | `knowledge_sources` | `Optional[List[BaseKnowledgeSource]]` | Knowledge sources available to the agent. |
|
||||||
| **Use System Prompt** _(optional)_ | `use_system_prompt` | `Optional[bool]` | Whether to use system prompt (for o1 model support). Default is True. |
|
| **Use System Prompt** _(optional)_ | `use_system_prompt` | `Optional[bool]` | Whether to use system prompt (for o1 model support). Default is True. |
|
||||||
|
|
||||||
@@ -152,7 +152,7 @@ agent = Agent(
|
|||||||
use_system_prompt=True, # Default: True
|
use_system_prompt=True, # Default: True
|
||||||
tools=[SerperDevTool()], # Optional: List of tools
|
tools=[SerperDevTool()], # Optional: List of tools
|
||||||
knowledge_sources=None, # Optional: List of knowledge sources
|
knowledge_sources=None, # Optional: List of knowledge sources
|
||||||
embedder=None, # Optional: Custom embedder configuration
|
embedder_config=None, # Optional: Custom embedder configuration
|
||||||
system_template=None, # Optional: Custom system prompt template
|
system_template=None, # Optional: Custom system prompt template
|
||||||
prompt_template=None, # Optional: Custom prompt template
|
prompt_template=None, # Optional: Custom prompt template
|
||||||
response_template=None, # Optional: Custom response template
|
response_template=None, # Optional: Custom response template
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ The CrewAI CLI provides a set of commands to interact with CrewAI, allowing you
|
|||||||
|
|
||||||
To use the CrewAI CLI, make sure you have CrewAI installed:
|
To use the CrewAI CLI, make sure you have CrewAI installed:
|
||||||
|
|
||||||
```shell Terminal
|
```shell
|
||||||
pip install crewai
|
pip install crewai
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -20,7 +20,7 @@ pip install crewai
|
|||||||
|
|
||||||
The basic structure of a CrewAI CLI command is:
|
The basic structure of a CrewAI CLI command is:
|
||||||
|
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai [COMMAND] [OPTIONS] [ARGUMENTS]
|
crewai [COMMAND] [OPTIONS] [ARGUMENTS]
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -30,7 +30,7 @@ crewai [COMMAND] [OPTIONS] [ARGUMENTS]
|
|||||||
|
|
||||||
Create a new crew or flow.
|
Create a new crew or flow.
|
||||||
|
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai create [OPTIONS] TYPE NAME
|
crewai create [OPTIONS] TYPE NAME
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -38,7 +38,7 @@ crewai create [OPTIONS] TYPE NAME
|
|||||||
- `NAME`: Name of the crew or flow
|
- `NAME`: Name of the crew or flow
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai create crew my_new_crew
|
crewai create crew my_new_crew
|
||||||
crewai create flow my_new_flow
|
crewai create flow my_new_flow
|
||||||
```
|
```
|
||||||
@@ -47,14 +47,14 @@ crewai create flow my_new_flow
|
|||||||
|
|
||||||
Show the installed version of CrewAI.
|
Show the installed version of CrewAI.
|
||||||
|
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai version [OPTIONS]
|
crewai version [OPTIONS]
|
||||||
```
|
```
|
||||||
|
|
||||||
- `--tools`: (Optional) Show the installed version of CrewAI tools
|
- `--tools`: (Optional) Show the installed version of CrewAI tools
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai version
|
crewai version
|
||||||
crewai version --tools
|
crewai version --tools
|
||||||
```
|
```
|
||||||
@@ -63,7 +63,7 @@ crewai version --tools
|
|||||||
|
|
||||||
Train the crew for a specified number of iterations.
|
Train the crew for a specified number of iterations.
|
||||||
|
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai train [OPTIONS]
|
crewai train [OPTIONS]
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -71,7 +71,7 @@ crewai train [OPTIONS]
|
|||||||
- `-f, --filename TEXT`: Path to a custom file for training (default: "trained_agents_data.pkl")
|
- `-f, --filename TEXT`: Path to a custom file for training (default: "trained_agents_data.pkl")
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai train -n 10 -f my_training_data.pkl
|
crewai train -n 10 -f my_training_data.pkl
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -79,14 +79,14 @@ crewai train -n 10 -f my_training_data.pkl
|
|||||||
|
|
||||||
Replay the crew execution from a specific task.
|
Replay the crew execution from a specific task.
|
||||||
|
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai replay [OPTIONS]
|
crewai replay [OPTIONS]
|
||||||
```
|
```
|
||||||
|
|
||||||
- `-t, --task_id TEXT`: Replay the crew from this task ID, including all subsequent tasks
|
- `-t, --task_id TEXT`: Replay the crew from this task ID, including all subsequent tasks
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai replay -t task_123456
|
crewai replay -t task_123456
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -94,7 +94,7 @@ crewai replay -t task_123456
|
|||||||
|
|
||||||
Retrieve your latest crew.kickoff() task outputs.
|
Retrieve your latest crew.kickoff() task outputs.
|
||||||
|
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai log-tasks-outputs
|
crewai log-tasks-outputs
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -102,7 +102,7 @@ crewai log-tasks-outputs
|
|||||||
|
|
||||||
Reset the crew memories (long, short, entity, latest_crew_kickoff_outputs).
|
Reset the crew memories (long, short, entity, latest_crew_kickoff_outputs).
|
||||||
|
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai reset-memories [OPTIONS]
|
crewai reset-memories [OPTIONS]
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -113,7 +113,7 @@ crewai reset-memories [OPTIONS]
|
|||||||
- `-a, --all`: Reset ALL memories
|
- `-a, --all`: Reset ALL memories
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai reset-memories --long --short
|
crewai reset-memories --long --short
|
||||||
crewai reset-memories --all
|
crewai reset-memories --all
|
||||||
```
|
```
|
||||||
@@ -122,7 +122,7 @@ crewai reset-memories --all
|
|||||||
|
|
||||||
Test the crew and evaluate the results.
|
Test the crew and evaluate the results.
|
||||||
|
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai test [OPTIONS]
|
crewai test [OPTIONS]
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -130,7 +130,7 @@ crewai test [OPTIONS]
|
|||||||
- `-m, --model TEXT`: LLM Model to run the tests on the Crew (default: "gpt-4o-mini")
|
- `-m, --model TEXT`: LLM Model to run the tests on the Crew (default: "gpt-4o-mini")
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai test -n 5 -m gpt-3.5-turbo
|
crewai test -n 5 -m gpt-3.5-turbo
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -138,7 +138,7 @@ crewai test -n 5 -m gpt-3.5-turbo
|
|||||||
|
|
||||||
Run the crew.
|
Run the crew.
|
||||||
|
|
||||||
```shell Terminal
|
```shell
|
||||||
crewai run
|
crewai run
|
||||||
```
|
```
|
||||||
<Note>
|
<Note>
|
||||||
@@ -147,36 +147,7 @@ Some commands may require additional configuration or setup within your project
|
|||||||
</Note>
|
</Note>
|
||||||
|
|
||||||
|
|
||||||
### 9. Chat
|
### 9. API Keys
|
||||||
|
|
||||||
Starting in version `0.98.0`, when you run the `crewai chat` command, you start an interactive session with your crew. The AI assistant will guide you by asking for necessary inputs to execute the crew. Once all inputs are provided, the crew will execute its tasks.
|
|
||||||
|
|
||||||
After receiving the results, you can continue interacting with the assistant for further instructions or questions.
|
|
||||||
|
|
||||||
```shell Terminal
|
|
||||||
crewai chat
|
|
||||||
```
|
|
||||||
<Note>
|
|
||||||
Ensure you execute these commands from your CrewAI project's root directory.
|
|
||||||
</Note>
|
|
||||||
<Note>
|
|
||||||
IMPORTANT: Set the `chat_llm` property in your `crew.py` file to enable this command.
|
|
||||||
|
|
||||||
```python
|
|
||||||
@crew
|
|
||||||
def crew(self) -> Crew:
|
|
||||||
return Crew(
|
|
||||||
agents=self.agents,
|
|
||||||
tasks=self.tasks,
|
|
||||||
process=Process.sequential,
|
|
||||||
verbose=True,
|
|
||||||
chat_llm="gpt-4o", # LLM for chat orchestration
|
|
||||||
)
|
|
||||||
```
|
|
||||||
</Note>
|
|
||||||
|
|
||||||
|
|
||||||
### 10. API Keys
|
|
||||||
|
|
||||||
When running ```crewai create crew``` command, the CLI will first show you the top 5 most common LLM providers and ask you to select one.
|
When running ```crewai create crew``` command, the CLI will first show you the top 5 most common LLM providers and ask you to select one.
|
||||||
|
|
||||||
|
|||||||
@@ -23,14 +23,14 @@ A crew in crewAI represents a collaborative group of agents working together to
|
|||||||
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
|
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
|
||||||
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
|
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
|
||||||
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
|
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
|
||||||
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
|
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
|
||||||
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
||||||
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
||||||
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
||||||
| **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
|
| **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
|
||||||
| **Task Callback** _(optional)_ | `task_callback` | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. |
|
| **Task Callback** _(optional)_ | `task_callback` | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. |
|
||||||
| **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
|
| **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
|
||||||
| **Output Log File** _(optional)_ | `output_log_file` | Set to True to save logs as logs.txt in the current directory or provide a file path. Logs will be in JSON format if the filename ends in .json, otherwise .txt. Defautls to `None`. |
|
| **Output Log File** _(optional)_ | `output_log_file` | Whether you want to have a file with the complete crew output and execution. You can set it using True and it will default to the folder you are currently in and it will be called logs.txt or passing a string with the full path and name of the file. |
|
||||||
| **Manager Agent** _(optional)_ | `manager_agent` | `manager` sets a custom agent that will be used as a manager. |
|
| **Manager Agent** _(optional)_ | `manager_agent` | `manager` sets a custom agent that will be used as a manager. |
|
||||||
| **Prompt File** _(optional)_ | `prompt_file` | Path to the prompt JSON file to be used for the crew. |
|
| **Prompt File** _(optional)_ | `prompt_file` | Path to the prompt JSON file to be used for the crew. |
|
||||||
| **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. |
|
| **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. |
|
||||||
@@ -240,23 +240,6 @@ print(f"Tasks Output: {crew_output.tasks_output}")
|
|||||||
print(f"Token Usage: {crew_output.token_usage}")
|
print(f"Token Usage: {crew_output.token_usage}")
|
||||||
```
|
```
|
||||||
|
|
||||||
## Accessing Crew Logs
|
|
||||||
|
|
||||||
You can see real time log of the crew execution, by setting `output_log_file` as a `True(Boolean)` or a `file_name(str)`. Supports logging of events as both `file_name.txt` and `file_name.json`.
|
|
||||||
In case of `True(Boolean)` will save as `logs.txt`.
|
|
||||||
|
|
||||||
In case of `output_log_file` is set as `False(Booelan)` or `None`, the logs will not be populated.
|
|
||||||
|
|
||||||
```python Code
|
|
||||||
# Save crew logs
|
|
||||||
crew = Crew(output_log_file = True) # Logs will be saved as logs.txt
|
|
||||||
crew = Crew(output_log_file = file_name) # Logs will be saved as file_name.txt
|
|
||||||
crew = Crew(output_log_file = file_name.txt) # Logs will be saved as file_name.txt
|
|
||||||
crew = Crew(output_log_file = file_name.json) # Logs will be saved as file_name.json
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Memory Utilization
|
## Memory Utilization
|
||||||
|
|
||||||
Crews can utilize memory (short-term, long-term, and entity memory) to enhance their execution and learning over time. This feature allows crews to store and recall execution memories, aiding in decision-making and task execution strategies.
|
Crews can utilize memory (short-term, long-term, and entity memory) to enhance their execution and learning over time. This feature allows crews to store and recall execution memories, aiding in decision-making and task execution strategies.
|
||||||
@@ -296,9 +279,9 @@ print(result)
|
|||||||
Once your crew is assembled, initiate the workflow with the appropriate kickoff method. CrewAI provides several methods for better control over the kickoff process: `kickoff()`, `kickoff_for_each()`, `kickoff_async()`, and `kickoff_for_each_async()`.
|
Once your crew is assembled, initiate the workflow with the appropriate kickoff method. CrewAI provides several methods for better control over the kickoff process: `kickoff()`, `kickoff_for_each()`, `kickoff_async()`, and `kickoff_for_each_async()`.
|
||||||
|
|
||||||
- `kickoff()`: Starts the execution process according to the defined process flow.
|
- `kickoff()`: Starts the execution process according to the defined process flow.
|
||||||
- `kickoff_for_each()`: Executes tasks sequentially for each provided input event or item in the collection.
|
- `kickoff_for_each()`: Executes tasks for each agent individually.
|
||||||
- `kickoff_async()`: Initiates the workflow asynchronously.
|
- `kickoff_async()`: Initiates the workflow asynchronously.
|
||||||
- `kickoff_for_each_async()`: Executes tasks concurrently for each provided input event or item, leveraging asynchronous processing.
|
- `kickoff_for_each_async()`: Executes tasks for each agent individually in an asynchronous manner.
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
# Start the crew's task execution
|
# Start the crew's task execution
|
||||||
|
|||||||
@@ -232,18 +232,18 @@ class UnstructuredExampleFlow(Flow):
|
|||||||
def first_method(self):
|
def first_method(self):
|
||||||
# The state automatically includes an 'id' field
|
# The state automatically includes an 'id' field
|
||||||
print(f"State ID: {self.state['id']}")
|
print(f"State ID: {self.state['id']}")
|
||||||
self.state['counter'] = 0
|
self.state.message = "Hello from structured flow"
|
||||||
self.state['message'] = "Hello from structured flow"
|
self.state.counter = 0
|
||||||
|
|
||||||
@listen(first_method)
|
@listen(first_method)
|
||||||
def second_method(self):
|
def second_method(self):
|
||||||
self.state['counter'] += 1
|
self.state.counter += 1
|
||||||
self.state['message'] += " - updated"
|
self.state.message += " - updated"
|
||||||
|
|
||||||
@listen(second_method)
|
@listen(second_method)
|
||||||
def third_method(self):
|
def third_method(self):
|
||||||
self.state['counter'] += 1
|
self.state.counter += 1
|
||||||
self.state['message'] += " - updated again"
|
self.state.message += " - updated again"
|
||||||
|
|
||||||
print(f"State after third_method: {self.state}")
|
print(f"State after third_method: {self.state}")
|
||||||
|
|
||||||
@@ -323,91 +323,6 @@ flow.kickoff()
|
|||||||
|
|
||||||
By providing both unstructured and structured state management options, CrewAI Flows empowers developers to build AI workflows that are both flexible and robust, catering to a wide range of application requirements.
|
By providing both unstructured and structured state management options, CrewAI Flows empowers developers to build AI workflows that are both flexible and robust, catering to a wide range of application requirements.
|
||||||
|
|
||||||
## Flow Persistence
|
|
||||||
|
|
||||||
The @persist decorator enables automatic state persistence in CrewAI Flows, allowing you to maintain flow state across restarts or different workflow executions. This decorator can be applied at either the class level or method level, providing flexibility in how you manage state persistence.
|
|
||||||
|
|
||||||
### Class-Level Persistence
|
|
||||||
|
|
||||||
When applied at the class level, the @persist decorator automatically persists all flow method states:
|
|
||||||
|
|
||||||
```python
|
|
||||||
@persist # Using SQLiteFlowPersistence by default
|
|
||||||
class MyFlow(Flow[MyState]):
|
|
||||||
@start()
|
|
||||||
def initialize_flow(self):
|
|
||||||
# This method will automatically have its state persisted
|
|
||||||
self.state.counter = 1
|
|
||||||
print("Initialized flow. State ID:", self.state.id)
|
|
||||||
|
|
||||||
@listen(initialize_flow)
|
|
||||||
def next_step(self):
|
|
||||||
# The state (including self.state.id) is automatically reloaded
|
|
||||||
self.state.counter += 1
|
|
||||||
print("Flow state is persisted. Counter:", self.state.counter)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Method-Level Persistence
|
|
||||||
|
|
||||||
For more granular control, you can apply @persist to specific methods:
|
|
||||||
|
|
||||||
```python
|
|
||||||
class AnotherFlow(Flow[dict]):
|
|
||||||
@persist # Persists only this method's state
|
|
||||||
@start()
|
|
||||||
def begin(self):
|
|
||||||
if "runs" not in self.state:
|
|
||||||
self.state["runs"] = 0
|
|
||||||
self.state["runs"] += 1
|
|
||||||
print("Method-level persisted runs:", self.state["runs"])
|
|
||||||
```
|
|
||||||
|
|
||||||
### How It Works
|
|
||||||
|
|
||||||
1. **Unique State Identification**
|
|
||||||
- Each flow state automatically receives a unique UUID
|
|
||||||
- The ID is preserved across state updates and method calls
|
|
||||||
- Supports both structured (Pydantic BaseModel) and unstructured (dictionary) states
|
|
||||||
|
|
||||||
2. **Default SQLite Backend**
|
|
||||||
- SQLiteFlowPersistence is the default storage backend
|
|
||||||
- States are automatically saved to a local SQLite database
|
|
||||||
- Robust error handling ensures clear messages if database operations fail
|
|
||||||
|
|
||||||
3. **Error Handling**
|
|
||||||
- Comprehensive error messages for database operations
|
|
||||||
- Automatic state validation during save and load
|
|
||||||
- Clear feedback when persistence operations encounter issues
|
|
||||||
|
|
||||||
### Important Considerations
|
|
||||||
|
|
||||||
- **State Types**: Both structured (Pydantic BaseModel) and unstructured (dictionary) states are supported
|
|
||||||
- **Automatic ID**: The `id` field is automatically added if not present
|
|
||||||
- **State Recovery**: Failed or restarted flows can automatically reload their previous state
|
|
||||||
- **Custom Implementation**: You can provide your own FlowPersistence implementation for specialized storage needs
|
|
||||||
|
|
||||||
### Technical Advantages
|
|
||||||
|
|
||||||
1. **Precise Control Through Low-Level Access**
|
|
||||||
- Direct access to persistence operations for advanced use cases
|
|
||||||
- Fine-grained control via method-level persistence decorators
|
|
||||||
- Built-in state inspection and debugging capabilities
|
|
||||||
- Full visibility into state changes and persistence operations
|
|
||||||
|
|
||||||
2. **Enhanced Reliability**
|
|
||||||
- Automatic state recovery after system failures or restarts
|
|
||||||
- Transaction-based state updates for data integrity
|
|
||||||
- Comprehensive error handling with clear error messages
|
|
||||||
- Robust validation during state save and load operations
|
|
||||||
|
|
||||||
3. **Extensible Architecture**
|
|
||||||
- Customizable persistence backend through FlowPersistence interface
|
|
||||||
- Support for specialized storage solutions beyond SQLite
|
|
||||||
- Compatible with both structured (Pydantic) and unstructured (dict) states
|
|
||||||
- Seamless integration with existing CrewAI flow patterns
|
|
||||||
|
|
||||||
The persistence system's architecture emphasizes technical precision and customization options, allowing developers to maintain full control over state management while benefiting from built-in reliability features.
|
|
||||||
|
|
||||||
## Flow Control
|
## Flow Control
|
||||||
|
|
||||||
### Conditional Logic: `or`
|
### Conditional Logic: `or`
|
||||||
|
|||||||
@@ -91,13 +91,7 @@ result = crew.kickoff(inputs={"question": "What city does John live in and how o
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
Here's another example with the `CrewDoclingSource`. The CrewDoclingSource is actually quite versatile and can handle multiple file formats including MD, PDF, DOCX, HTML, and more.
|
Here's another example with the `CrewDoclingSource`. The CrewDoclingSource is actually quite versatile and can handle multiple file formats including TXT, PDF, DOCX, HTML, and more.
|
||||||
|
|
||||||
<Note>
|
|
||||||
You need to install `docling` for the following example to work: `uv add docling`
|
|
||||||
</Note>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai import LLM, Agent, Crew, Process, Task
|
from crewai import LLM, Agent, Crew, Process, Task
|
||||||
@@ -152,10 +146,10 @@ Here are examples of how to use different types of knowledge sources:
|
|||||||
|
|
||||||
### Text File Knowledge Source
|
### Text File Knowledge Source
|
||||||
```python
|
```python
|
||||||
from crewai.knowledge.source.text_file_knowledge_source import TextFileKnowledgeSource
|
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource
|
||||||
|
|
||||||
# Create a text file knowledge source
|
# Create a text file knowledge source
|
||||||
text_source = TextFileKnowledgeSource(
|
text_source = CrewDoclingSource(
|
||||||
file_paths=["document.txt", "another.txt"]
|
file_paths=["document.txt", "another.txt"]
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -288,7 +282,6 @@ The `embedder` parameter supports various embedding model providers that include
|
|||||||
- `ollama`: Local embeddings with Ollama
|
- `ollama`: Local embeddings with Ollama
|
||||||
- `vertexai`: Google Cloud VertexAI embeddings
|
- `vertexai`: Google Cloud VertexAI embeddings
|
||||||
- `cohere`: Cohere's embedding models
|
- `cohere`: Cohere's embedding models
|
||||||
- `voyageai`: VoyageAI's embedding models
|
|
||||||
- `bedrock`: AWS Bedrock embeddings
|
- `bedrock`: AWS Bedrock embeddings
|
||||||
- `huggingface`: Hugging Face models
|
- `huggingface`: Hugging Face models
|
||||||
- `watson`: IBM Watson embeddings
|
- `watson`: IBM Watson embeddings
|
||||||
@@ -324,13 +317,6 @@ agent = Agent(
|
|||||||
verbose=True,
|
verbose=True,
|
||||||
allow_delegation=False,
|
allow_delegation=False,
|
||||||
llm=gemini_llm,
|
llm=gemini_llm,
|
||||||
embedder={
|
|
||||||
"provider": "google",
|
|
||||||
"config": {
|
|
||||||
"model": "models/text-embedding-004",
|
|
||||||
"api_key": GEMINI_API_KEY,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
task = Task(
|
task = Task(
|
||||||
|
|||||||
@@ -27,6 +27,155 @@ Large Language Models (LLMs) are the core intelligence behind CrewAI agents. The
|
|||||||
</Card>
|
</Card>
|
||||||
</CardGroup>
|
</CardGroup>
|
||||||
|
|
||||||
|
## Available Models and Their Capabilities
|
||||||
|
|
||||||
|
Here's a detailed breakdown of supported models and their capabilities, you can compare performance at [lmarena.ai](https://lmarena.ai/?leaderboard) and [artificialanalysis.ai](https://artificialanalysis.ai/):
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<Tab title="OpenAI">
|
||||||
|
| Model | Context Window | Best For |
|
||||||
|
|-------|---------------|-----------|
|
||||||
|
| GPT-4 | 8,192 tokens | High-accuracy tasks, complex reasoning |
|
||||||
|
| GPT-4 Turbo | 128,000 tokens | Long-form content, document analysis |
|
||||||
|
| GPT-4o & GPT-4o-mini | 128,000 tokens | Cost-effective large context processing |
|
||||||
|
|
||||||
|
<Note>
|
||||||
|
1 token ≈ 4 characters in English. For example, 8,192 tokens ≈ 32,768 characters or about 6,000 words.
|
||||||
|
</Note>
|
||||||
|
</Tab>
|
||||||
|
<Tab title="Nvidia NIM">
|
||||||
|
| Model | Context Window | Best For |
|
||||||
|
|-------|---------------|-----------|
|
||||||
|
| nvidia/mistral-nemo-minitron-8b-8k-instruct | 8,192 tokens | State-of-the-art small language model delivering superior accuracy for chatbot, virtual assistants, and content generation. |
|
||||||
|
| nvidia/nemotron-4-mini-hindi-4b-instruct| 4,096 tokens | A bilingual Hindi-English SLM for on-device inference, tailored specifically for Hindi Language. |
|
||||||
|
| "nvidia/llama-3.1-nemotron-70b-instruct | 128k tokens | Llama-3.1-Nemotron-70B-Instruct is a large language model customized by NVIDIA in order to improve the helpfulness of LLM generated responses. |
|
||||||
|
| nvidia/llama3-chatqa-1.5-8b | 128k tokens | Advanced LLM to generate high-quality, context-aware responses for chatbots and search engines. |
|
||||||
|
| nvidia/llama3-chatqa-1.5-70b | 128k tokens | Advanced LLM to generate high-quality, context-aware responses for chatbots and search engines. |
|
||||||
|
| nvidia/vila | 128k tokens | Multi-modal vision-language model that understands text/img/video and creates informative responses |
|
||||||
|
| nvidia/neva-22| 4,096 tokens | Multi-modal vision-language model that understands text/images and generates informative responses |
|
||||||
|
| nvidia/nemotron-mini-4b-instruct | 8,192 tokens | General-purpose tasks |
|
||||||
|
| nvidia/usdcode-llama3-70b-instruct | 128k tokens | State-of-the-art LLM that answers OpenUSD knowledge queries and generates USD-Python code. |
|
||||||
|
| nvidia/nemotron-4-340b-instruct | 4,096 tokens | Creates diverse synthetic data that mimics the characteristics of real-world data. |
|
||||||
|
| meta/codellama-70b | 100k tokens | LLM capable of generating code from natural language and vice versa. |
|
||||||
|
| meta/llama2-70b | 4,096 tokens | Cutting-edge large language AI model capable of generating text and code in response to prompts. |
|
||||||
|
| meta/llama3-8b-instruct | 8,192 tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. |
|
||||||
|
| meta/llama3-70b-instruct | 8,192 tokens | Powers complex conversations with superior contextual understanding, reasoning and text generation. |
|
||||||
|
| meta/llama-3.1-8b-instruct | 128k tokens | Advanced state-of-the-art model with language understanding, superior reasoning, and text generation. |
|
||||||
|
| meta/llama-3.1-70b-instruct | 128k tokens | Powers complex conversations with superior contextual understanding, reasoning and text generation. |
|
||||||
|
| meta/llama-3.1-405b-instruct | 128k tokens | Advanced LLM for synthetic data generation, distillation, and inference for chatbots, coding, and domain-specific tasks. |
|
||||||
|
| meta/llama-3.2-1b-instruct | 128k tokens | Advanced state-of-the-art small language model with language understanding, superior reasoning, and text generation. |
|
||||||
|
| meta/llama-3.2-3b-instruct | 128k tokens | Advanced state-of-the-art small language model with language understanding, superior reasoning, and text generation. |
|
||||||
|
| meta/llama-3.2-11b-vision-instruct | 128k tokens | Advanced state-of-the-art small language model with language understanding, superior reasoning, and text generation. |
|
||||||
|
| meta/llama-3.2-90b-vision-instruct | 128k tokens | Advanced state-of-the-art small language model with language understanding, superior reasoning, and text generation. |
|
||||||
|
| meta/llama-3.1-70b-instruct | 128k tokens | Powers complex conversations with superior contextual understanding, reasoning and text generation. |
|
||||||
|
| google/gemma-7b | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
||||||
|
| google/gemma-2b | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
||||||
|
| google/codegemma-7b | 8,192 tokens | Cutting-edge model built on Google's Gemma-7B specialized for code generation and code completion. |
|
||||||
|
| google/codegemma-1.1-7b | 8,192 tokens | Advanced programming model for code generation, completion, reasoning, and instruction following. |
|
||||||
|
| google/recurrentgemma-2b | 8,192 tokens | Novel recurrent architecture based language model for faster inference when generating long sequences. |
|
||||||
|
| google/gemma-2-9b-it | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
||||||
|
| google/gemma-2-27b-it | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
||||||
|
| google/gemma-2-2b-it | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
||||||
|
| google/deplot | 512 tokens | One-shot visual language understanding model that translates images of plots into tables. |
|
||||||
|
| google/paligemma | 8,192 tokens | Vision language model adept at comprehending text and visual inputs to produce informative responses. |
|
||||||
|
| mistralai/mistral-7b-instruct-v0.2 | 32k tokens | This LLM follows instructions, completes requests, and generates creative text. |
|
||||||
|
| mistralai/mixtral-8x7b-instruct-v0.1 | 8,192 tokens | An MOE LLM that follows instructions, completes requests, and generates creative text. |
|
||||||
|
| mistralai/mistral-large | 4,096 tokens | Creates diverse synthetic data that mimics the characteristics of real-world data. |
|
||||||
|
| mistralai/mixtral-8x22b-instruct-v0.1 | 8,192 tokens | Creates diverse synthetic data that mimics the characteristics of real-world data. |
|
||||||
|
| mistralai/mistral-7b-instruct-v0.3 | 32k tokens | This LLM follows instructions, completes requests, and generates creative text. |
|
||||||
|
| nv-mistralai/mistral-nemo-12b-instruct | 128k tokens | Most advanced language model for reasoning, code, multilingual tasks; runs on a single GPU. |
|
||||||
|
| mistralai/mamba-codestral-7b-v0.1 | 256k tokens | Model for writing and interacting with code across a wide range of programming languages and tasks. |
|
||||||
|
| microsoft/phi-3-mini-128k-instruct | 128K tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
||||||
|
| microsoft/phi-3-mini-4k-instruct | 4,096 tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
||||||
|
| microsoft/phi-3-small-8k-instruct | 8,192 tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
||||||
|
| microsoft/phi-3-small-128k-instruct | 128K tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
||||||
|
| microsoft/phi-3-medium-4k-instruct | 4,096 tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
||||||
|
| microsoft/phi-3-medium-128k-instruct | 128K tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
||||||
|
| microsoft/phi-3.5-mini-instruct | 128K tokens | Lightweight multilingual LLM powering AI applications in latency bound, memory/compute constrained environments |
|
||||||
|
| microsoft/phi-3.5-moe-instruct | 128K tokens | Advanced LLM based on Mixture of Experts architecure to deliver compute efficient content generation |
|
||||||
|
| microsoft/kosmos-2 | 1,024 tokens | Groundbreaking multimodal model designed to understand and reason about visual elements in images. |
|
||||||
|
| microsoft/phi-3-vision-128k-instruct | 128k tokens | Cutting-edge open multimodal model exceling in high-quality reasoning from images. |
|
||||||
|
| microsoft/phi-3.5-vision-instruct | 128k tokens | Cutting-edge open multimodal model exceling in high-quality reasoning from images. |
|
||||||
|
| databricks/dbrx-instruct | 12k tokens | A general-purpose LLM with state-of-the-art performance in language understanding, coding, and RAG. |
|
||||||
|
| snowflake/arctic | 1,024 tokens | Delivers high efficiency inference for enterprise applications focused on SQL generation and coding. |
|
||||||
|
| aisingapore/sea-lion-7b-instruct | 4,096 tokens | LLM to represent and serve the linguistic and cultural diversity of Southeast Asia |
|
||||||
|
| ibm/granite-8b-code-instruct | 4,096 tokens | Software programming LLM for code generation, completion, explanation, and multi-turn conversion. |
|
||||||
|
| ibm/granite-34b-code-instruct | 8,192 tokens | Software programming LLM for code generation, completion, explanation, and multi-turn conversion. |
|
||||||
|
| ibm/granite-3.0-8b-instruct | 4,096 tokens | Advanced Small Language Model supporting RAG, summarization, classification, code, and agentic AI |
|
||||||
|
| ibm/granite-3.0-3b-a800m-instruct | 4,096 tokens | Highly efficient Mixture of Experts model for RAG, summarization, entity extraction, and classification |
|
||||||
|
| mediatek/breeze-7b-instruct | 4,096 tokens | Creates diverse synthetic data that mimics the characteristics of real-world data. |
|
||||||
|
| upstage/solar-10.7b-instruct | 4,096 tokens | Excels in NLP tasks, particularly in instruction-following, reasoning, and mathematics. |
|
||||||
|
| writer/palmyra-med-70b-32k | 32k tokens | Leading LLM for accurate, contextually relevant responses in the medical domain. |
|
||||||
|
| writer/palmyra-med-70b | 32k tokens | Leading LLM for accurate, contextually relevant responses in the medical domain. |
|
||||||
|
| writer/palmyra-fin-70b-32k | 32k tokens | Specialized LLM for financial analysis, reporting, and data processing |
|
||||||
|
| 01-ai/yi-large | 32k tokens | Powerful model trained on English and Chinese for diverse tasks including chatbot and creative writing. |
|
||||||
|
| deepseek-ai/deepseek-coder-6.7b-instruct | 2k tokens | Powerful coding model offering advanced capabilities in code generation, completion, and infilling |
|
||||||
|
| rakuten/rakutenai-7b-instruct | 1,024 tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. |
|
||||||
|
| rakuten/rakutenai-7b-chat | 1,024 tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. |
|
||||||
|
| baichuan-inc/baichuan2-13b-chat | 4,096 tokens | Support Chinese and English chat, coding, math, instruction following, solving quizzes |
|
||||||
|
|
||||||
|
<Note>
|
||||||
|
NVIDIA's NIM support for models is expanding continuously! For the most up-to-date list of available models, please visit build.nvidia.com.
|
||||||
|
</Note>
|
||||||
|
</Tab>
|
||||||
|
<Tab title="Gemini">
|
||||||
|
| Model | Context Window | Best For |
|
||||||
|
|-------|---------------|-----------|
|
||||||
|
| gemini-2.0-flash-exp | 1M tokens | Higher quality at faster speed, multimodal model, good for most tasks |
|
||||||
|
| gemini-1.5-flash | 1M tokens | Balanced multimodal model, good for most tasks |
|
||||||
|
| gemini-1.5-flash-8B | 1M tokens | Fastest, most cost-efficient, good for high-frequency tasks |
|
||||||
|
| gemini-1.5-pro | 2M tokens | Best performing, wide variety of reasoning tasks including logical reasoning, coding, and creative collaboration |
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
Google's Gemini models are all multimodal, supporting audio, images, video and text, supporting context caching, json schema, function calling, etc.
|
||||||
|
|
||||||
|
These models are available via API_KEY from
|
||||||
|
[The Gemini API](https://ai.google.dev/gemini-api/docs) and also from
|
||||||
|
[Google Cloud Vertex](https://cloud.google.com/vertex-ai/generative-ai/docs/migrate/migrate-google-ai) as part of the
|
||||||
|
[Model Garden](https://cloud.google.com/vertex-ai/generative-ai/docs/model-garden/explore-models).
|
||||||
|
</Tip>
|
||||||
|
</Tab>
|
||||||
|
<Tab title="Groq">
|
||||||
|
| Model | Context Window | Best For |
|
||||||
|
|-------|---------------|-----------|
|
||||||
|
| Llama 3.1 70B/8B | 131,072 tokens | High-performance, large context tasks |
|
||||||
|
| Llama 3.2 Series | 8,192 tokens | General-purpose tasks |
|
||||||
|
| Mixtral 8x7B | 32,768 tokens | Balanced performance and context |
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
Groq is known for its fast inference speeds, making it suitable for real-time applications.
|
||||||
|
</Tip>
|
||||||
|
</Tab>
|
||||||
|
<Tab title="SambaNova">
|
||||||
|
| Model | Context Window | Best For |
|
||||||
|
|-------|---------------|-----------|
|
||||||
|
| Llama 3.1 70B/8B | Up to 131,072 tokens | High-performance, large context tasks |
|
||||||
|
| Llama 3.1 405B | 8,192 tokens | High-performance and output quality |
|
||||||
|
| Llama 3.2 Series | 8,192 tokens | General-purpose tasks, multimodal |
|
||||||
|
| Llama 3.3 70B | Up to 131,072 tokens | High-performance and output quality|
|
||||||
|
| Qwen2 familly | 8,192 tokens | High-performance and output quality |
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
[SambaNova](https://cloud.sambanova.ai/) has several models with fast inference speed at full precision.
|
||||||
|
</Tip>
|
||||||
|
</Tab>
|
||||||
|
<Tab title="Others">
|
||||||
|
| Provider | Context Window | Key Features |
|
||||||
|
|----------|---------------|--------------|
|
||||||
|
| Deepseek Chat | 128,000 tokens | Specialized in technical discussions |
|
||||||
|
| Claude 3 | Up to 200K tokens | Strong reasoning, code understanding |
|
||||||
|
| Gemma Series | 8,192 tokens | Efficient, smaller-scale tasks |
|
||||||
|
|
||||||
|
<Info>
|
||||||
|
Provider selection should consider factors like:
|
||||||
|
- API availability in your region
|
||||||
|
- Pricing structure
|
||||||
|
- Required features (e.g., streaming, function calling)
|
||||||
|
- Performance requirements
|
||||||
|
</Info>
|
||||||
|
</Tab>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
## Setting Up Your LLM
|
## Setting Up Your LLM
|
||||||
|
|
||||||
There are three ways to configure LLMs in CrewAI. Choose the method that best fits your workflow:
|
There are three ways to configure LLMs in CrewAI. Choose the method that best fits your workflow:
|
||||||
@@ -55,12 +204,95 @@ There are three ways to configure LLMs in CrewAI. Choose the method that best fi
|
|||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
researcher:
|
researcher:
|
||||||
|
# Agent Definition
|
||||||
role: Research Specialist
|
role: Research Specialist
|
||||||
goal: Conduct comprehensive research and analysis
|
goal: Conduct comprehensive research and analysis
|
||||||
backstory: A dedicated research professional with years of experience
|
backstory: A dedicated research professional with years of experience
|
||||||
verbose: true
|
verbose: true
|
||||||
llm: openai/gpt-4o-mini # your model here
|
|
||||||
# (see provider configuration examples below for more)
|
# Model Selection (uncomment your choice)
|
||||||
|
|
||||||
|
# OpenAI Models - Known for reliability and performance
|
||||||
|
llm: openai/gpt-4o-mini
|
||||||
|
# llm: openai/gpt-4 # More accurate but expensive
|
||||||
|
# llm: openai/gpt-4-turbo # Fast with large context
|
||||||
|
# llm: openai/gpt-4o # Optimized for longer texts
|
||||||
|
# llm: openai/o1-preview # Latest features
|
||||||
|
# llm: openai/o1-mini # Cost-effective
|
||||||
|
|
||||||
|
# Azure Models - For enterprise deployments
|
||||||
|
# llm: azure/gpt-4o-mini
|
||||||
|
# llm: azure/gpt-4
|
||||||
|
# llm: azure/gpt-35-turbo
|
||||||
|
|
||||||
|
# Anthropic Models - Strong reasoning capabilities
|
||||||
|
# llm: anthropic/claude-3-opus-20240229-v1:0
|
||||||
|
# llm: anthropic/claude-3-sonnet-20240229-v1:0
|
||||||
|
# llm: anthropic/claude-3-haiku-20240307-v1:0
|
||||||
|
# llm: anthropic/claude-2.1
|
||||||
|
# llm: anthropic/claude-2.0
|
||||||
|
|
||||||
|
# Google Models - Strong reasoning, large cachable context window, multimodal
|
||||||
|
# llm: gemini/gemini-1.5-pro-latest
|
||||||
|
# llm: gemini/gemini-1.5-flash-latest
|
||||||
|
# llm: gemini/gemini-1.5-flash-8b-latest
|
||||||
|
|
||||||
|
# AWS Bedrock Models - Enterprise-grade
|
||||||
|
# llm: bedrock/anthropic.claude-3-sonnet-20240229-v1:0
|
||||||
|
# llm: bedrock/anthropic.claude-v2:1
|
||||||
|
# llm: bedrock/amazon.titan-text-express-v1
|
||||||
|
# llm: bedrock/meta.llama2-70b-chat-v1
|
||||||
|
|
||||||
|
# Mistral Models - Open source alternative
|
||||||
|
# llm: mistral/mistral-large-latest
|
||||||
|
# llm: mistral/mistral-medium-latest
|
||||||
|
# llm: mistral/mistral-small-latest
|
||||||
|
|
||||||
|
# Groq Models - Fast inference
|
||||||
|
# llm: groq/mixtral-8x7b-32768
|
||||||
|
# llm: groq/llama-3.1-70b-versatile
|
||||||
|
# llm: groq/llama-3.2-90b-text-preview
|
||||||
|
# llm: groq/gemma2-9b-it
|
||||||
|
# llm: groq/gemma-7b-it
|
||||||
|
|
||||||
|
# IBM watsonx.ai Models - Enterprise features
|
||||||
|
# llm: watsonx/ibm/granite-13b-chat-v2
|
||||||
|
# llm: watsonx/meta-llama/llama-3-1-70b-instruct
|
||||||
|
# llm: watsonx/bigcode/starcoder2-15b
|
||||||
|
|
||||||
|
# Ollama Models - Local deployment
|
||||||
|
# llm: ollama/llama3:70b
|
||||||
|
# llm: ollama/codellama
|
||||||
|
# llm: ollama/mistral
|
||||||
|
# llm: ollama/mixtral
|
||||||
|
# llm: ollama/phi
|
||||||
|
|
||||||
|
# Fireworks AI Models - Specialized tasks
|
||||||
|
# llm: fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct
|
||||||
|
# llm: fireworks_ai/accounts/fireworks/models/mixtral-8x7b
|
||||||
|
# llm: fireworks_ai/accounts/fireworks/models/zephyr-7b-beta
|
||||||
|
|
||||||
|
# Perplexity AI Models - Research focused
|
||||||
|
# llm: pplx/llama-3.1-sonar-large-128k-online
|
||||||
|
# llm: pplx/mistral-7b-instruct
|
||||||
|
# llm: pplx/codellama-34b-instruct
|
||||||
|
# llm: pplx/mixtral-8x7b-instruct
|
||||||
|
|
||||||
|
# Hugging Face Models - Community models
|
||||||
|
# llm: huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct
|
||||||
|
# llm: huggingface/mistralai/Mixtral-8x7B-Instruct-v0.1
|
||||||
|
# llm: huggingface/tiiuae/falcon-180B-chat
|
||||||
|
# llm: huggingface/google/gemma-7b-it
|
||||||
|
|
||||||
|
# Nvidia NIM Models - GPU-optimized
|
||||||
|
# llm: nvidia_nim/meta/llama3-70b-instruct
|
||||||
|
# llm: nvidia_nim/mistral/mixtral-8x7b
|
||||||
|
# llm: nvidia_nim/google/gemma-7b
|
||||||
|
|
||||||
|
# SambaNova Models - Enterprise AI
|
||||||
|
# llm: sambanova/Meta-Llama-3.1-8B-Instruct
|
||||||
|
# llm: sambanova/BioMistral-7B
|
||||||
|
# llm: sambanova/Falcon-180B
|
||||||
```
|
```
|
||||||
|
|
||||||
<Info>
|
<Info>
|
||||||
@@ -108,465 +340,6 @@ There are three ways to configure LLMs in CrewAI. Choose the method that best fi
|
|||||||
</Tab>
|
</Tab>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
## Provider Configuration Examples
|
|
||||||
|
|
||||||
|
|
||||||
CrewAI supports a multitude of LLM providers, each offering unique features, authentication methods, and model capabilities.
|
|
||||||
In this section, you'll find detailed examples that help you select, configure, and optimize the LLM that best fits your project's needs.
|
|
||||||
|
|
||||||
<AccordionGroup>
|
|
||||||
<Accordion title="OpenAI">
|
|
||||||
Set the following environment variables in your `.env` file:
|
|
||||||
|
|
||||||
```toml Code
|
|
||||||
# Required
|
|
||||||
OPENAI_API_KEY=sk-...
|
|
||||||
|
|
||||||
# Optional
|
|
||||||
OPENAI_API_BASE=<custom-base-url>
|
|
||||||
OPENAI_ORGANIZATION=<your-org-id>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
from crewai import LLM
|
|
||||||
|
|
||||||
llm = LLM(
|
|
||||||
model="openai/gpt-4", # call model by provider/model_name
|
|
||||||
temperature=0.8,
|
|
||||||
max_tokens=150,
|
|
||||||
top_p=0.9,
|
|
||||||
frequency_penalty=0.1,
|
|
||||||
presence_penalty=0.1,
|
|
||||||
stop=["END"],
|
|
||||||
seed=42
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
OpenAI is one of the leading providers of LLMs with a wide range of models and features.
|
|
||||||
|
|
||||||
| Model | Context Window | Best For |
|
|
||||||
|---------------------|------------------|-----------------------------------------------|
|
|
||||||
| GPT-4 | 8,192 tokens | High-accuracy tasks, complex reasoning |
|
|
||||||
| GPT-4 Turbo | 128,000 tokens | Long-form content, document analysis |
|
|
||||||
| GPT-4o & GPT-4o-mini | 128,000 tokens | Cost-effective large context processing |
|
|
||||||
| o3-mini | 200,000 tokens | Fast reasoning, complex reasoning |
|
|
||||||
| o1-mini | 128,000 tokens | Fast reasoning, complex reasoning |
|
|
||||||
| o1-preview | 128,000 tokens | Fast reasoning, complex reasoning |
|
|
||||||
| o1 | 200,000 tokens | Fast reasoning, complex reasoning |
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="Anthropic">
|
|
||||||
```toml Code
|
|
||||||
ANTHROPIC_API_KEY=sk-ant-...
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="anthropic/claude-3-sonnet-20240229-v1:0",
|
|
||||||
temperature=0.7
|
|
||||||
)
|
|
||||||
```
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="Google">
|
|
||||||
Set the following environment variables in your `.env` file:
|
|
||||||
|
|
||||||
```toml Code
|
|
||||||
# Option 1: Gemini accessed with an API key.
|
|
||||||
# https://ai.google.dev/gemini-api/docs/api-key
|
|
||||||
GEMINI_API_KEY=<your-api-key>
|
|
||||||
|
|
||||||
# Option 2: Vertex AI IAM credentials for Gemini, Anthropic, and Model Garden.
|
|
||||||
# https://cloud.google.com/vertex-ai/generative-ai/docs/overview
|
|
||||||
```
|
|
||||||
|
|
||||||
Get credentials from your Google Cloud Console and save it to a JSON file with the following code:
|
|
||||||
```python Code
|
|
||||||
import json
|
|
||||||
|
|
||||||
file_path = 'path/to/vertex_ai_service_account.json'
|
|
||||||
|
|
||||||
# Load the JSON file
|
|
||||||
with open(file_path, 'r') as file:
|
|
||||||
vertex_credentials = json.load(file)
|
|
||||||
|
|
||||||
# Convert the credentials to a JSON string
|
|
||||||
vertex_credentials_json = json.dumps(vertex_credentials)
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
from crewai import LLM
|
|
||||||
|
|
||||||
llm = LLM(
|
|
||||||
model="gemini/gemini-1.5-pro-latest",
|
|
||||||
temperature=0.7,
|
|
||||||
vertex_credentials=vertex_credentials_json
|
|
||||||
)
|
|
||||||
```
|
|
||||||
Google offers a range of powerful models optimized for different use cases:
|
|
||||||
|
|
||||||
| Model | Context Window | Best For |
|
|
||||||
|-----------------------|----------------|------------------------------------------------------------------|
|
|
||||||
| gemini-2.0-flash-exp | 1M tokens | Higher quality at faster speed, multimodal model, good for most tasks |
|
|
||||||
| gemini-1.5-flash | 1M tokens | Balanced multimodal model, good for most tasks |
|
|
||||||
| gemini-1.5-flash-8B | 1M tokens | Fastest, most cost-efficient, good for high-frequency tasks |
|
|
||||||
| gemini-1.5-pro | 2M tokens | Best performing, wide variety of reasoning tasks including logical reasoning, coding, and creative collaboration |
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="Azure">
|
|
||||||
```toml Code
|
|
||||||
# Required
|
|
||||||
AZURE_API_KEY=<your-api-key>
|
|
||||||
AZURE_API_BASE=<your-resource-url>
|
|
||||||
AZURE_API_VERSION=<api-version>
|
|
||||||
|
|
||||||
# Optional
|
|
||||||
AZURE_AD_TOKEN=<your-azure-ad-token>
|
|
||||||
AZURE_API_TYPE=<your-azure-api-type>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="azure/gpt-4",
|
|
||||||
api_version="2023-05-15"
|
|
||||||
)
|
|
||||||
```
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="AWS Bedrock">
|
|
||||||
```toml Code
|
|
||||||
AWS_ACCESS_KEY_ID=<your-access-key>
|
|
||||||
AWS_SECRET_ACCESS_KEY=<your-secret-key>
|
|
||||||
AWS_DEFAULT_REGION=<your-region>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
|
|
||||||
)
|
|
||||||
```
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="Amazon SageMaker">
|
|
||||||
```toml Code
|
|
||||||
AWS_ACCESS_KEY_ID=<your-access-key>
|
|
||||||
AWS_SECRET_ACCESS_KEY=<your-secret-key>
|
|
||||||
AWS_DEFAULT_REGION=<your-region>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="sagemaker/<my-endpoint>"
|
|
||||||
)
|
|
||||||
```
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="Mistral">
|
|
||||||
Set the following environment variables in your `.env` file:
|
|
||||||
```toml Code
|
|
||||||
MISTRAL_API_KEY=<your-api-key>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="mistral/mistral-large-latest",
|
|
||||||
temperature=0.7
|
|
||||||
)
|
|
||||||
```
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="Nvidia NIM">
|
|
||||||
Set the following environment variables in your `.env` file:
|
|
||||||
```toml Code
|
|
||||||
NVIDIA_API_KEY=<your-api-key>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="nvidia_nim/meta/llama3-70b-instruct",
|
|
||||||
temperature=0.7
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
Nvidia NIM provides a comprehensive suite of models for various use cases, from general-purpose tasks to specialized applications.
|
|
||||||
|
|
||||||
| Model | Context Window | Best For |
|
|
||||||
|-------------------------------------------------------------------------|----------------|-------------------------------------------------------------------|
|
|
||||||
| nvidia/mistral-nemo-minitron-8b-8k-instruct | 8,192 tokens | State-of-the-art small language model delivering superior accuracy for chatbot, virtual assistants, and content generation. |
|
|
||||||
| nvidia/nemotron-4-mini-hindi-4b-instruct | 4,096 tokens | A bilingual Hindi-English SLM for on-device inference, tailored specifically for Hindi Language. |
|
|
||||||
| nvidia/llama-3.1-nemotron-70b-instruct | 128k tokens | Customized for enhanced helpfulness in responses |
|
|
||||||
| nvidia/llama3-chatqa-1.5-8b | 128k tokens | Advanced LLM to generate high-quality, context-aware responses for chatbots and search engines. |
|
|
||||||
| nvidia/llama3-chatqa-1.5-70b | 128k tokens | Advanced LLM to generate high-quality, context-aware responses for chatbots and search engines. |
|
|
||||||
| nvidia/vila | 128k tokens | Multi-modal vision-language model that understands text/img/video and creates informative responses |
|
|
||||||
| nvidia/neva-22 | 4,096 tokens | Multi-modal vision-language model that understands text/images and generates informative responses |
|
|
||||||
| nvidia/nemotron-mini-4b-instruct | 8,192 tokens | General-purpose tasks |
|
|
||||||
| nvidia/usdcode-llama3-70b-instruct | 128k tokens | State-of-the-art LLM that answers OpenUSD knowledge queries and generates USD-Python code. |
|
|
||||||
| nvidia/nemotron-4-340b-instruct | 4,096 tokens | Creates diverse synthetic data that mimics the characteristics of real-world data. |
|
|
||||||
| meta/codellama-70b | 100k tokens | LLM capable of generating code from natural language and vice versa. |
|
|
||||||
| meta/llama2-70b | 4,096 tokens | Cutting-edge large language AI model capable of generating text and code in response to prompts. |
|
|
||||||
| meta/llama3-8b-instruct | 8,192 tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. |
|
|
||||||
| meta/llama3-70b-instruct | 8,192 tokens | Powers complex conversations with superior contextual understanding, reasoning and text generation. |
|
|
||||||
| meta/llama-3.1-8b-instruct | 128k tokens | Advanced state-of-the-art model with language understanding, superior reasoning, and text generation. |
|
|
||||||
| meta/llama-3.1-70b-instruct | 128k tokens | Powers complex conversations with superior contextual understanding, reasoning and text generation. |
|
|
||||||
| meta/llama-3.1-405b-instruct | 128k tokens | Advanced LLM for synthetic data generation, distillation, and inference for chatbots, coding, and domain-specific tasks. |
|
|
||||||
| meta/llama-3.2-1b-instruct | 128k tokens | Advanced state-of-the-art small language model with language understanding, superior reasoning, and text generation. |
|
|
||||||
| meta/llama-3.2-3b-instruct | 128k tokens | Advanced state-of-the-art small language model with language understanding, superior reasoning, and text generation. |
|
|
||||||
| meta/llama-3.2-11b-vision-instruct | 128k tokens | Advanced state-of-the-art small language model with language understanding, superior reasoning, and text generation. |
|
|
||||||
| meta/llama-3.2-90b-vision-instruct | 128k tokens | Advanced state-of-the-art small language model with language understanding, superior reasoning, and text generation. |
|
|
||||||
| google/gemma-7b | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
|
||||||
| google/gemma-2b | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
|
||||||
| google/codegemma-7b | 8,192 tokens | Cutting-edge model built on Google's Gemma-7B specialized for code generation and code completion. |
|
|
||||||
| google/codegemma-1.1-7b | 8,192 tokens | Advanced programming model for code generation, completion, reasoning, and instruction following. |
|
|
||||||
| google/recurrentgemma-2b | 8,192 tokens | Novel recurrent architecture based language model for faster inference when generating long sequences. |
|
|
||||||
| google/gemma-2-9b-it | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
|
||||||
| google/gemma-2-27b-it | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
|
||||||
| google/gemma-2-2b-it | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
|
||||||
| google/deplot | 512 tokens | One-shot visual language understanding model that translates images of plots into tables. |
|
|
||||||
| google/paligemma | 8,192 tokens | Vision language model adept at comprehending text and visual inputs to produce informative responses. |
|
|
||||||
| mistralai/mistral-7b-instruct-v0.2 | 32k tokens | This LLM follows instructions, completes requests, and generates creative text. |
|
|
||||||
| mistralai/mixtral-8x7b-instruct-v0.1 | 8,192 tokens | An MOE LLM that follows instructions, completes requests, and generates creative text. |
|
|
||||||
| mistralai/mistral-large | 4,096 tokens | Creates diverse synthetic data that mimics the characteristics of real-world data. |
|
|
||||||
| mistralai/mixtral-8x22b-instruct-v0.1 | 8,192 tokens | Creates diverse synthetic data that mimics the characteristics of real-world data. |
|
|
||||||
| mistralai/mistral-7b-instruct-v0.3 | 32k tokens | This LLM follows instructions, completes requests, and generates creative text. |
|
|
||||||
| nv-mistralai/mistral-nemo-12b-instruct | 128k tokens | Most advanced language model for reasoning, code, multilingual tasks; runs on a single GPU. |
|
|
||||||
| mistralai/mamba-codestral-7b-v0.1 | 256k tokens | Model for writing and interacting with code across a wide range of programming languages and tasks. |
|
|
||||||
| microsoft/phi-3-mini-128k-instruct | 128K tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
|
||||||
| microsoft/phi-3-mini-4k-instruct | 4,096 tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
|
||||||
| microsoft/phi-3-small-8k-instruct | 8,192 tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
|
||||||
| microsoft/phi-3-small-128k-instruct | 128K tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
|
||||||
| microsoft/phi-3-medium-4k-instruct | 4,096 tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
|
||||||
| microsoft/phi-3-medium-128k-instruct | 128K tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
|
||||||
| microsoft/phi-3.5-mini-instruct | 128K tokens | Lightweight multilingual LLM powering AI applications in latency bound, memory/compute constrained environments |
|
|
||||||
| microsoft/phi-3.5-moe-instruct | 128K tokens | Advanced LLM based on Mixture of Experts architecure to deliver compute efficient content generation |
|
|
||||||
| microsoft/kosmos-2 | 1,024 tokens | Groundbreaking multimodal model designed to understand and reason about visual elements in images. |
|
|
||||||
| microsoft/phi-3-vision-128k-instruct | 128k tokens | Cutting-edge open multimodal model exceling in high-quality reasoning from images. |
|
|
||||||
| microsoft/phi-3.5-vision-instruct | 128k tokens | Cutting-edge open multimodal model exceling in high-quality reasoning from images. |
|
|
||||||
| databricks/dbrx-instruct | 12k tokens | A general-purpose LLM with state-of-the-art performance in language understanding, coding, and RAG. |
|
|
||||||
| snowflake/arctic | 1,024 tokens | Delivers high efficiency inference for enterprise applications focused on SQL generation and coding. |
|
|
||||||
| aisingapore/sea-lion-7b-instruct | 4,096 tokens | LLM to represent and serve the linguistic and cultural diversity of Southeast Asia |
|
|
||||||
| ibm/granite-8b-code-instruct | 4,096 tokens | Software programming LLM for code generation, completion, explanation, and multi-turn conversion. |
|
|
||||||
| ibm/granite-34b-code-instruct | 8,192 tokens | Software programming LLM for code generation, completion, explanation, and multi-turn conversion. |
|
|
||||||
| ibm/granite-3.0-8b-instruct | 4,096 tokens | Advanced Small Language Model supporting RAG, summarization, classification, code, and agentic AI |
|
|
||||||
| ibm/granite-3.0-3b-a800m-instruct | 4,096 tokens | Highly efficient Mixture of Experts model for RAG, summarization, entity extraction, and classification |
|
|
||||||
| mediatek/breeze-7b-instruct | 4,096 tokens | Creates diverse synthetic data that mimics the characteristics of real-world data. |
|
|
||||||
| upstage/solar-10.7b-instruct | 4,096 tokens | Excels in NLP tasks, particularly in instruction-following, reasoning, and mathematics. |
|
|
||||||
| writer/palmyra-med-70b-32k | 32k tokens | Leading LLM for accurate, contextually relevant responses in the medical domain. |
|
|
||||||
| writer/palmyra-med-70b | 32k tokens | Leading LLM for accurate, contextually relevant responses in the medical domain. |
|
|
||||||
| writer/palmyra-fin-70b-32k | 32k tokens | Specialized LLM for financial analysis, reporting, and data processing |
|
|
||||||
| 01-ai/yi-large | 32k tokens | Powerful model trained on English and Chinese for diverse tasks including chatbot and creative writing. |
|
|
||||||
| deepseek-ai/deepseek-coder-6.7b-instruct | 2k tokens | Powerful coding model offering advanced capabilities in code generation, completion, and infilling |
|
|
||||||
| rakuten/rakutenai-7b-instruct | 1,024 tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. |
|
|
||||||
| rakuten/rakutenai-7b-chat | 1,024 tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. |
|
|
||||||
| baichuan-inc/baichuan2-13b-chat | 4,096 tokens | Support Chinese and English chat, coding, math, instruction following, solving quizzes |
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="Groq">
|
|
||||||
Set the following environment variables in your `.env` file:
|
|
||||||
|
|
||||||
```toml Code
|
|
||||||
GROQ_API_KEY=<your-api-key>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="groq/llama-3.2-90b-text-preview",
|
|
||||||
temperature=0.7
|
|
||||||
)
|
|
||||||
```
|
|
||||||
| Model | Context Window | Best For |
|
|
||||||
|-------------------|------------------|--------------------------------------------|
|
|
||||||
| Llama 3.1 70B/8B | 131,072 tokens | High-performance, large context tasks |
|
|
||||||
| Llama 3.2 Series | 8,192 tokens | General-purpose tasks |
|
|
||||||
| Mixtral 8x7B | 32,768 tokens | Balanced performance and context |
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="IBM watsonx.ai">
|
|
||||||
Set the following environment variables in your `.env` file:
|
|
||||||
```toml Code
|
|
||||||
# Required
|
|
||||||
WATSONX_URL=<your-url>
|
|
||||||
WATSONX_APIKEY=<your-apikey>
|
|
||||||
WATSONX_PROJECT_ID=<your-project-id>
|
|
||||||
|
|
||||||
# Optional
|
|
||||||
WATSONX_TOKEN=<your-token>
|
|
||||||
WATSONX_DEPLOYMENT_SPACE_ID=<your-space-id>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="watsonx/meta-llama/llama-3-1-70b-instruct",
|
|
||||||
base_url="https://api.watsonx.ai/v1"
|
|
||||||
)
|
|
||||||
```
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="Ollama (Local LLMs)">
|
|
||||||
1. Install Ollama: [ollama.ai](https://ollama.ai/)
|
|
||||||
2. Run a model: `ollama run llama2`
|
|
||||||
3. Configure:
|
|
||||||
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="ollama/llama3:70b",
|
|
||||||
base_url="http://localhost:11434"
|
|
||||||
)
|
|
||||||
```
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="Fireworks AI">
|
|
||||||
Set the following environment variables in your `.env` file:
|
|
||||||
```toml Code
|
|
||||||
FIREWORKS_API_KEY=<your-api-key>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct",
|
|
||||||
temperature=0.7
|
|
||||||
)
|
|
||||||
```
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="Perplexity AI">
|
|
||||||
Set the following environment variables in your `.env` file:
|
|
||||||
```toml Code
|
|
||||||
PERPLEXITY_API_KEY=<your-api-key>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="llama-3.1-sonar-large-128k-online",
|
|
||||||
base_url="https://api.perplexity.ai/"
|
|
||||||
)
|
|
||||||
```
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="Hugging Face">
|
|
||||||
Set the following environment variables in your `.env` file:
|
|
||||||
```toml Code
|
|
||||||
HUGGINGFACE_API_KEY=<your-api-key>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
||||||
base_url="your_api_endpoint"
|
|
||||||
)
|
|
||||||
```
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="SambaNova">
|
|
||||||
Set the following environment variables in your `.env` file:
|
|
||||||
|
|
||||||
```toml Code
|
|
||||||
SAMBANOVA_API_KEY=<your-api-key>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="sambanova/Meta-Llama-3.1-8B-Instruct",
|
|
||||||
temperature=0.7
|
|
||||||
)
|
|
||||||
```
|
|
||||||
| Model | Context Window | Best For |
|
|
||||||
|--------------------|------------------------|----------------------------------------------|
|
|
||||||
| Llama 3.1 70B/8B | Up to 131,072 tokens | High-performance, large context tasks |
|
|
||||||
| Llama 3.1 405B | 8,192 tokens | High-performance and output quality |
|
|
||||||
| Llama 3.2 Series | 8,192 tokens | General-purpose, multimodal tasks |
|
|
||||||
| Llama 3.3 70B | Up to 131,072 tokens | High-performance and output quality |
|
|
||||||
| Qwen2 familly | 8,192 tokens | High-performance and output quality |
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="Cerebras">
|
|
||||||
Set the following environment variables in your `.env` file:
|
|
||||||
```toml Code
|
|
||||||
# Required
|
|
||||||
CEREBRAS_API_KEY=<your-api-key>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="cerebras/llama3.1-70b",
|
|
||||||
temperature=0.7,
|
|
||||||
max_tokens=8192
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
<Info>
|
|
||||||
Cerebras features:
|
|
||||||
- Fast inference speeds
|
|
||||||
- Competitive pricing
|
|
||||||
- Good balance of speed and quality
|
|
||||||
- Support for long context windows
|
|
||||||
</Info>
|
|
||||||
</Accordion>
|
|
||||||
|
|
||||||
<Accordion title="Open Router">
|
|
||||||
Set the following environment variables in your `.env` file:
|
|
||||||
```toml Code
|
|
||||||
OPENROUTER_API_KEY=<your-api-key>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
|
||||||
```python Code
|
|
||||||
llm = LLM(
|
|
||||||
model="openrouter/deepseek/deepseek-r1",
|
|
||||||
base_url="https://openrouter.ai/api/v1",
|
|
||||||
api_key=OPENROUTER_API_KEY
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
<Info>
|
|
||||||
Open Router models:
|
|
||||||
- openrouter/deepseek/deepseek-r1
|
|
||||||
- openrouter/deepseek/deepseek-chat
|
|
||||||
</Info>
|
|
||||||
</Accordion>
|
|
||||||
</AccordionGroup>
|
|
||||||
|
|
||||||
## Structured LLM Calls
|
|
||||||
|
|
||||||
CrewAI supports structured responses from LLM calls by allowing you to define a `response_format` using a Pydantic model. This enables the framework to automatically parse and validate the output, making it easier to integrate the response into your application without manual post-processing.
|
|
||||||
|
|
||||||
For example, you can define a Pydantic model to represent the expected response structure and pass it as the `response_format` when instantiating the LLM. The model will then be used to convert the LLM output into a structured Python object.
|
|
||||||
|
|
||||||
```python Code
|
|
||||||
from crewai import LLM
|
|
||||||
|
|
||||||
class Dog(BaseModel):
|
|
||||||
name: str
|
|
||||||
age: int
|
|
||||||
breed: str
|
|
||||||
|
|
||||||
|
|
||||||
llm = LLM(model="gpt-4o", response_format=Dog)
|
|
||||||
|
|
||||||
response = llm.call(
|
|
||||||
"Analyze the following messages and return the name, age, and breed. "
|
|
||||||
"Meet Kona! She is 3 years old and is a black german shepherd."
|
|
||||||
)
|
|
||||||
print(response)
|
|
||||||
|
|
||||||
# Output:
|
|
||||||
# Dog(name='Kona', age=3, breed='black german shepherd')
|
|
||||||
```
|
|
||||||
|
|
||||||
## Advanced Features and Optimization
|
## Advanced Features and Optimization
|
||||||
|
|
||||||
Learn how to get the most out of your LLM configuration:
|
Learn how to get the most out of your LLM configuration:
|
||||||
@@ -635,6 +408,262 @@ Learn how to get the most out of your LLM configuration:
|
|||||||
</Accordion>
|
</Accordion>
|
||||||
</AccordionGroup>
|
</AccordionGroup>
|
||||||
|
|
||||||
|
## Provider Configuration Examples
|
||||||
|
|
||||||
|
<AccordionGroup>
|
||||||
|
<Accordion title="OpenAI">
|
||||||
|
```python Code
|
||||||
|
# Required
|
||||||
|
OPENAI_API_KEY=sk-...
|
||||||
|
|
||||||
|
# Optional
|
||||||
|
OPENAI_API_BASE=<custom-base-url>
|
||||||
|
OPENAI_ORGANIZATION=<your-org-id>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
|
llm = LLM(
|
||||||
|
model="gpt-4",
|
||||||
|
temperature=0.8,
|
||||||
|
max_tokens=150,
|
||||||
|
top_p=0.9,
|
||||||
|
frequency_penalty=0.1,
|
||||||
|
presence_penalty=0.1,
|
||||||
|
stop=["END"],
|
||||||
|
seed=42
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Anthropic">
|
||||||
|
```python Code
|
||||||
|
ANTHROPIC_API_KEY=sk-ant-...
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="anthropic/claude-3-sonnet-20240229-v1:0",
|
||||||
|
temperature=0.7
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Google">
|
||||||
|
```python Code
|
||||||
|
# Option 1. Gemini accessed with an API key.
|
||||||
|
# https://ai.google.dev/gemini-api/docs/api-key
|
||||||
|
GEMINI_API_KEY=<your-api-key>
|
||||||
|
|
||||||
|
# Option 2. Vertex AI IAM credentials for Gemini, Anthropic, and anything in the Model Garden.
|
||||||
|
# https://cloud.google.com/vertex-ai/generative-ai/docs/overview
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="gemini/gemini-1.5-pro-latest",
|
||||||
|
temperature=0.7
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Azure">
|
||||||
|
```python Code
|
||||||
|
# Required
|
||||||
|
AZURE_API_KEY=<your-api-key>
|
||||||
|
AZURE_API_BASE=<your-resource-url>
|
||||||
|
AZURE_API_VERSION=<api-version>
|
||||||
|
|
||||||
|
# Optional
|
||||||
|
AZURE_AD_TOKEN=<your-azure-ad-token>
|
||||||
|
AZURE_API_TYPE=<your-azure-api-type>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="azure/gpt-4",
|
||||||
|
api_version="2023-05-15"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="AWS Bedrock">
|
||||||
|
```python Code
|
||||||
|
AWS_ACCESS_KEY_ID=<your-access-key>
|
||||||
|
AWS_SECRET_ACCESS_KEY=<your-secret-key>
|
||||||
|
AWS_DEFAULT_REGION=<your-region>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Mistral">
|
||||||
|
```python Code
|
||||||
|
MISTRAL_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="mistral/mistral-large-latest",
|
||||||
|
temperature=0.7
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Nvidia NIM">
|
||||||
|
```python Code
|
||||||
|
NVIDIA_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="nvidia_nim/meta/llama3-70b-instruct",
|
||||||
|
temperature=0.7
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Groq">
|
||||||
|
```python Code
|
||||||
|
GROQ_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="groq/llama-3.2-90b-text-preview",
|
||||||
|
temperature=0.7
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="IBM watsonx.ai">
|
||||||
|
```python Code
|
||||||
|
# Required
|
||||||
|
WATSONX_URL=<your-url>
|
||||||
|
WATSONX_APIKEY=<your-apikey>
|
||||||
|
WATSONX_PROJECT_ID=<your-project-id>
|
||||||
|
|
||||||
|
# Optional
|
||||||
|
WATSONX_TOKEN=<your-token>
|
||||||
|
WATSONX_DEPLOYMENT_SPACE_ID=<your-space-id>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="watsonx/meta-llama/llama-3-1-70b-instruct",
|
||||||
|
base_url="https://api.watsonx.ai/v1"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Ollama (Local LLMs)">
|
||||||
|
1. Install Ollama: [ollama.ai](https://ollama.ai/)
|
||||||
|
2. Run a model: `ollama run llama2`
|
||||||
|
3. Configure:
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="ollama/llama3:70b",
|
||||||
|
base_url="http://localhost:11434"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Fireworks AI">
|
||||||
|
```python Code
|
||||||
|
FIREWORKS_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct",
|
||||||
|
temperature=0.7
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Perplexity AI">
|
||||||
|
```python Code
|
||||||
|
PERPLEXITY_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="llama-3.1-sonar-large-128k-online",
|
||||||
|
base_url="https://api.perplexity.ai/"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Hugging Face">
|
||||||
|
```python Code
|
||||||
|
HUGGINGFACE_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||||
|
base_url="your_api_endpoint"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="SambaNova">
|
||||||
|
```python Code
|
||||||
|
SAMBANOVA_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="sambanova/Meta-Llama-3.1-8B-Instruct",
|
||||||
|
temperature=0.7
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Cerebras">
|
||||||
|
```python Code
|
||||||
|
# Required
|
||||||
|
CEREBRAS_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="cerebras/llama3.1-70b",
|
||||||
|
temperature=0.7,
|
||||||
|
max_tokens=8192
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
<Info>
|
||||||
|
Cerebras features:
|
||||||
|
- Fast inference speeds
|
||||||
|
- Competitive pricing
|
||||||
|
- Good balance of speed and quality
|
||||||
|
- Support for long context windows
|
||||||
|
</Info>
|
||||||
|
</Accordion>
|
||||||
|
</AccordionGroup>
|
||||||
|
|
||||||
## Common Issues and Solutions
|
## Common Issues and Solutions
|
||||||
|
|
||||||
<Tabs>
|
<Tabs>
|
||||||
|
|||||||
@@ -58,107 +58,41 @@ my_crew = Crew(
|
|||||||
### Example: Use Custom Memory Instances e.g FAISS as the VectorDB
|
### Example: Use Custom Memory Instances e.g FAISS as the VectorDB
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai import Crew, Process
|
from crewai import Crew, Agent, Task, Process
|
||||||
from crewai.memory import LongTermMemory, ShortTermMemory, EntityMemory
|
|
||||||
from crewai.memory.storage import LTMSQLiteStorage, RAGStorage
|
|
||||||
from typing import List, Optional
|
|
||||||
|
|
||||||
# Assemble your crew with memory capabilities
|
# Assemble your crew with memory capabilities
|
||||||
my_crew: Crew = Crew(
|
my_crew = Crew(
|
||||||
agents = [...],
|
agents=[...],
|
||||||
tasks = [...],
|
tasks=[...],
|
||||||
process = Process.sequential,
|
process="Process.sequential",
|
||||||
memory = True,
|
memory=True,
|
||||||
# Long-term memory for persistent storage across sessions
|
long_term_memory=EnhanceLongTermMemory(
|
||||||
long_term_memory = LongTermMemory(
|
|
||||||
storage=LTMSQLiteStorage(
|
storage=LTMSQLiteStorage(
|
||||||
db_path="/my_crew1/long_term_memory_storage.db"
|
db_path="/my_data_dir/my_crew1/long_term_memory_storage.db"
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
# Short-term memory for current context using RAG
|
short_term_memory=EnhanceShortTermMemory(
|
||||||
short_term_memory = ShortTermMemory(
|
storage=CustomRAGStorage(
|
||||||
storage = RAGStorage(
|
crew_name="my_crew",
|
||||||
embedder_config={
|
storage_type="short_term",
|
||||||
"provider": "openai",
|
data_dir="//my_data_dir",
|
||||||
"config": {
|
model=embedder["model"],
|
||||||
"model": 'text-embedding-3-small'
|
dimension=embedder["dimension"],
|
||||||
}
|
|
||||||
},
|
|
||||||
type="short_term",
|
|
||||||
path="/my_crew1/"
|
|
||||||
)
|
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
# Entity memory for tracking key information about entities
|
entity_memory=EnhanceEntityMemory(
|
||||||
entity_memory = EntityMemory(
|
storage=CustomRAGStorage(
|
||||||
storage=RAGStorage(
|
crew_name="my_crew",
|
||||||
embedder_config={
|
storage_type="entities",
|
||||||
"provider": "openai",
|
data_dir="//my_data_dir",
|
||||||
"config": {
|
model=embedder["model"],
|
||||||
"model": 'text-embedding-3-small'
|
dimension=embedder["dimension"],
|
||||||
}
|
),
|
||||||
},
|
|
||||||
type="short_term",
|
|
||||||
path="/my_crew1/"
|
|
||||||
)
|
|
||||||
),
|
),
|
||||||
verbose=True,
|
verbose=True,
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Security Considerations
|
|
||||||
|
|
||||||
When configuring memory storage:
|
|
||||||
- Use environment variables for storage paths (e.g., `CREWAI_STORAGE_DIR`)
|
|
||||||
- Never hardcode sensitive information like database credentials
|
|
||||||
- Consider access permissions for storage directories
|
|
||||||
- Use relative paths when possible to maintain portability
|
|
||||||
|
|
||||||
Example using environment variables:
|
|
||||||
```python
|
|
||||||
import os
|
|
||||||
from crewai import Crew
|
|
||||||
from crewai.memory import LongTermMemory
|
|
||||||
from crewai.memory.storage import LTMSQLiteStorage
|
|
||||||
|
|
||||||
# Configure storage path using environment variable
|
|
||||||
storage_path = os.getenv("CREWAI_STORAGE_DIR", "./storage")
|
|
||||||
crew = Crew(
|
|
||||||
memory=True,
|
|
||||||
long_term_memory=LongTermMemory(
|
|
||||||
storage=LTMSQLiteStorage(
|
|
||||||
db_path="{storage_path}/memory.db".format(storage_path=storage_path)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration Examples
|
|
||||||
|
|
||||||
### Basic Memory Configuration
|
|
||||||
```python
|
|
||||||
from crewai import Crew
|
|
||||||
from crewai.memory import LongTermMemory
|
|
||||||
|
|
||||||
# Simple memory configuration
|
|
||||||
crew = Crew(memory=True) # Uses default storage locations
|
|
||||||
```
|
|
||||||
|
|
||||||
### Custom Storage Configuration
|
|
||||||
```python
|
|
||||||
from crewai import Crew
|
|
||||||
from crewai.memory import LongTermMemory
|
|
||||||
from crewai.memory.storage import LTMSQLiteStorage
|
|
||||||
|
|
||||||
# Configure custom storage paths
|
|
||||||
crew = Crew(
|
|
||||||
memory=True,
|
|
||||||
long_term_memory=LongTermMemory(
|
|
||||||
storage=LTMSQLiteStorage(db_path="./memory.db")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Integrating Mem0 for Enhanced User Memory
|
## Integrating Mem0 for Enhanced User Memory
|
||||||
|
|
||||||
[Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences.
|
[Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences.
|
||||||
@@ -251,12 +185,7 @@ my_crew = Crew(
|
|||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
memory=True,
|
memory=True,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
embedder={
|
embedder=OpenAIEmbeddingFunction(api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"),
|
||||||
"provider": "openai",
|
|
||||||
"config": {
|
|
||||||
"model": 'text-embedding-3-small'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -282,19 +211,6 @@ my_crew = Crew(
|
|||||||
|
|
||||||
### Using Google AI embeddings
|
### Using Google AI embeddings
|
||||||
|
|
||||||
#### Prerequisites
|
|
||||||
Before using Google AI embeddings, ensure you have:
|
|
||||||
- Access to the Gemini API
|
|
||||||
- The necessary API keys and permissions
|
|
||||||
|
|
||||||
You will need to update your *pyproject.toml* dependencies:
|
|
||||||
```YAML
|
|
||||||
dependencies = [
|
|
||||||
"google-generativeai>=0.8.4", #main version in January/2025 - crewai v.0.100.0 and crewai-tools 0.33.0
|
|
||||||
"crewai[tools]>=0.100.0,<1.0.0"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai import Crew, Agent, Task, Process
|
from crewai import Crew, Agent, Task, Process
|
||||||
|
|
||||||
@@ -308,7 +224,7 @@ my_crew = Crew(
|
|||||||
"provider": "google",
|
"provider": "google",
|
||||||
"config": {
|
"config": {
|
||||||
"api_key": "<YOUR_API_KEY>",
|
"api_key": "<YOUR_API_KEY>",
|
||||||
"model": "<model_name>"
|
"model_name": "<model_name>"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -326,15 +242,13 @@ my_crew = Crew(
|
|||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
memory=True,
|
memory=True,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
embedder={
|
embedder=OpenAIEmbeddingFunction(
|
||||||
"provider": "openai",
|
api_key="YOUR_API_KEY",
|
||||||
"config": {
|
api_base="YOUR_API_BASE_PATH",
|
||||||
"api_key": "YOUR_API_KEY",
|
api_type="azure",
|
||||||
"api_base": "YOUR_API_BASE_PATH",
|
api_version="YOUR_API_VERSION",
|
||||||
"api_version": "YOUR_API_VERSION",
|
model_name="text-embedding-3-small"
|
||||||
"model_name": 'text-embedding-3-small'
|
)
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -350,15 +264,12 @@ my_crew = Crew(
|
|||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
memory=True,
|
memory=True,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
embedder={
|
embedder=GoogleVertexEmbeddingFunction(
|
||||||
"provider": "vertexai",
|
project_id="YOUR_PROJECT_ID",
|
||||||
"config": {
|
region="YOUR_REGION",
|
||||||
"project_id"="YOUR_PROJECT_ID",
|
api_key="YOUR_API_KEY",
|
||||||
"region"="YOUR_REGION",
|
model_name="textembedding-gecko"
|
||||||
"api_key"="YOUR_API_KEY",
|
)
|
||||||
"model_name"="textembedding-gecko"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -377,27 +288,7 @@ my_crew = Crew(
|
|||||||
"provider": "cohere",
|
"provider": "cohere",
|
||||||
"config": {
|
"config": {
|
||||||
"api_key": "YOUR_API_KEY",
|
"api_key": "YOUR_API_KEY",
|
||||||
"model": "<model_name>"
|
"model_name": "<model_name>"
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
```
|
|
||||||
### Using VoyageAI embeddings
|
|
||||||
|
|
||||||
```python Code
|
|
||||||
from crewai import Crew, Agent, Task, Process
|
|
||||||
|
|
||||||
my_crew = Crew(
|
|
||||||
agents=[...],
|
|
||||||
tasks=[...],
|
|
||||||
process=Process.sequential,
|
|
||||||
memory=True,
|
|
||||||
verbose=True,
|
|
||||||
embedder={
|
|
||||||
"provider": "voyageai",
|
|
||||||
"config": {
|
|
||||||
"api_key": "YOUR_API_KEY",
|
|
||||||
"model": "<model_name>"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -447,66 +338,7 @@ my_crew = Crew(
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
### Using Amazon Bedrock embeddings
|
### Resetting Memory
|
||||||
|
|
||||||
```python Code
|
|
||||||
# Note: Ensure you have installed `boto3` for Bedrock embeddings to work.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import boto3
|
|
||||||
from crewai import Crew, Agent, Task, Process
|
|
||||||
|
|
||||||
boto3_session = boto3.Session(
|
|
||||||
region_name=os.environ.get("AWS_REGION_NAME"),
|
|
||||||
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
|
|
||||||
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY")
|
|
||||||
)
|
|
||||||
|
|
||||||
my_crew = Crew(
|
|
||||||
agents=[...],
|
|
||||||
tasks=[...],
|
|
||||||
process=Process.sequential,
|
|
||||||
memory=True,
|
|
||||||
embedder={
|
|
||||||
"provider": "bedrock",
|
|
||||||
"config":{
|
|
||||||
"session": boto3_session,
|
|
||||||
"model": "amazon.titan-embed-text-v2:0",
|
|
||||||
"vector_dimension": 1024
|
|
||||||
}
|
|
||||||
}
|
|
||||||
verbose=True
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Adding Custom Embedding Function
|
|
||||||
|
|
||||||
```python Code
|
|
||||||
from crewai import Crew, Agent, Task, Process
|
|
||||||
from chromadb import Documents, EmbeddingFunction, Embeddings
|
|
||||||
|
|
||||||
# Create a custom embedding function
|
|
||||||
class CustomEmbedder(EmbeddingFunction):
|
|
||||||
def __call__(self, input: Documents) -> Embeddings:
|
|
||||||
# generate embeddings
|
|
||||||
return [1, 2, 3] # this is a dummy embedding
|
|
||||||
|
|
||||||
my_crew = Crew(
|
|
||||||
agents=[...],
|
|
||||||
tasks=[...],
|
|
||||||
process=Process.sequential,
|
|
||||||
memory=True,
|
|
||||||
verbose=True,
|
|
||||||
embedder={
|
|
||||||
"provider": "custom",
|
|
||||||
"config": {
|
|
||||||
"embedder": CustomEmbedder()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Resetting Memory via cli
|
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
crewai reset-memories [OPTIONS]
|
crewai reset-memories [OPTIONS]
|
||||||
@@ -520,46 +352,8 @@ crewai reset-memories [OPTIONS]
|
|||||||
| `-s`, `--short` | Reset SHORT TERM memory. | Flag (boolean) | False |
|
| `-s`, `--short` | Reset SHORT TERM memory. | Flag (boolean) | False |
|
||||||
| `-e`, `--entities` | Reset ENTITIES memory. | Flag (boolean) | False |
|
| `-e`, `--entities` | Reset ENTITIES memory. | Flag (boolean) | False |
|
||||||
| `-k`, `--kickoff-outputs` | Reset LATEST KICKOFF TASK OUTPUTS. | Flag (boolean) | False |
|
| `-k`, `--kickoff-outputs` | Reset LATEST KICKOFF TASK OUTPUTS. | Flag (boolean) | False |
|
||||||
| `-kn`, `--knowledge` | Reset KNOWLEDEGE storage | Flag (boolean) | False |
|
|
||||||
| `-a`, `--all` | Reset ALL memories. | Flag (boolean) | False |
|
| `-a`, `--all` | Reset ALL memories. | Flag (boolean) | False |
|
||||||
|
|
||||||
Note: To use the cli command you need to have your crew in a file called crew.py in the same directory.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Resetting Memory via crew object
|
|
||||||
|
|
||||||
```python
|
|
||||||
|
|
||||||
my_crew = Crew(
|
|
||||||
agents=[...],
|
|
||||||
tasks=[...],
|
|
||||||
process=Process.sequential,
|
|
||||||
memory=True,
|
|
||||||
verbose=True,
|
|
||||||
embedder={
|
|
||||||
"provider": "custom",
|
|
||||||
"config": {
|
|
||||||
"embedder": CustomEmbedder()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
my_crew.reset_memories(command_type = 'all') # Resets all the memory
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Resetting Memory Options
|
|
||||||
|
|
||||||
| Command Type | Description |
|
|
||||||
| :----------------- | :------------------------------- |
|
|
||||||
| `long` | Reset LONG TERM memory. |
|
|
||||||
| `short` | Reset SHORT TERM memory. |
|
|
||||||
| `entities` | Reset ENTITIES memory. |
|
|
||||||
| `kickoff_outputs` | Reset LATEST KICKOFF TASK OUTPUTS. |
|
|
||||||
| `knowledge` | Reset KNOWLEDGE memory. |
|
|
||||||
| `all` | Reset ALL memories. |
|
|
||||||
|
|
||||||
|
|
||||||
## Benefits of Using CrewAI's Memory System
|
## Benefits of Using CrewAI's Memory System
|
||||||
|
|
||||||
|
|||||||
@@ -81,8 +81,8 @@ my_crew.kickoff()
|
|||||||
|
|
||||||
3. **Collect Data:**
|
3. **Collect Data:**
|
||||||
|
|
||||||
- Search for the latest papers, articles, and reports published in 2024 and early 2025.
|
- Search for the latest papers, articles, and reports published in 2023 and early 2024.
|
||||||
- Use keywords like "Large Language Models 2025", "AI LLM advancements", "AI ethics 2025", etc.
|
- Use keywords like "Large Language Models 2024", "AI LLM advancements", "AI ethics 2024", etc.
|
||||||
|
|
||||||
4. **Analyze Findings:**
|
4. **Analyze Findings:**
|
||||||
|
|
||||||
|
|||||||
@@ -33,12 +33,11 @@ crew = Crew(
|
|||||||
| :------------------------------- | :---------------- | :---------------------------- | :------------------------------------------------------------------------------------------------------------------- |
|
| :------------------------------- | :---------------- | :---------------------------- | :------------------------------------------------------------------------------------------------------------------- |
|
||||||
| **Description** | `description` | `str` | A clear, concise statement of what the task entails. |
|
| **Description** | `description` | `str` | A clear, concise statement of what the task entails. |
|
||||||
| **Expected Output** | `expected_output` | `str` | A detailed description of what the task's completion looks like. |
|
| **Expected Output** | `expected_output` | `str` | A detailed description of what the task's completion looks like. |
|
||||||
| **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. |
|
| **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. |
|
||||||
| **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. |
|
| **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. |
|
||||||
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. |
|
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. |
|
||||||
| **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Other tasks whose outputs will be used as context for this task. |
|
| **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Other tasks whose outputs will be used as context for this task. |
|
||||||
| **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | Whether the task should be executed asynchronously. Defaults to False. |
|
| **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | Whether the task should be executed asynchronously. Defaults to False. |
|
||||||
| **Human Input** _(optional)_ | `human_input` | `Optional[bool]` | Whether the task should have a human review the final answer of the agent. Defaults to False. |
|
|
||||||
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. |
|
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. |
|
||||||
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. |
|
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. |
|
||||||
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. |
|
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. |
|
||||||
@@ -69,7 +68,7 @@ research_task:
|
|||||||
description: >
|
description: >
|
||||||
Conduct a thorough research about {topic}
|
Conduct a thorough research about {topic}
|
||||||
Make sure you find any interesting and relevant information given
|
Make sure you find any interesting and relevant information given
|
||||||
the current year is 2025.
|
the current year is 2024.
|
||||||
expected_output: >
|
expected_output: >
|
||||||
A list with 10 bullet points of the most relevant information about {topic}
|
A list with 10 bullet points of the most relevant information about {topic}
|
||||||
agent: researcher
|
agent: researcher
|
||||||
@@ -155,7 +154,7 @@ research_task = Task(
|
|||||||
description="""
|
description="""
|
||||||
Conduct a thorough research about AI Agents.
|
Conduct a thorough research about AI Agents.
|
||||||
Make sure you find any interesting and relevant information given
|
Make sure you find any interesting and relevant information given
|
||||||
the current year is 2025.
|
the current year is 2024.
|
||||||
""",
|
""",
|
||||||
expected_output="""
|
expected_output="""
|
||||||
A list with 10 bullet points of the most relevant information about AI Agents
|
A list with 10 bullet points of the most relevant information about AI Agents
|
||||||
@@ -268,7 +267,7 @@ analysis_task = Task(
|
|||||||
|
|
||||||
Task guardrails provide a way to validate and transform task outputs before they
|
Task guardrails provide a way to validate and transform task outputs before they
|
||||||
are passed to the next task. This feature helps ensure data quality and provides
|
are passed to the next task. This feature helps ensure data quality and provides
|
||||||
feedback to agents when their output doesn't meet specific criteria.
|
efeedback to agents when their output doesn't meet specific criteria.
|
||||||
|
|
||||||
### Using Task Guardrails
|
### Using Task Guardrails
|
||||||
|
|
||||||
|
|||||||
@@ -60,12 +60,12 @@ writer = Agent(
|
|||||||
# Create tasks for your agents
|
# Create tasks for your agents
|
||||||
task1 = Task(
|
task1 = Task(
|
||||||
description=(
|
description=(
|
||||||
"Conduct a comprehensive analysis of the latest advancements in AI in 2025. "
|
"Conduct a comprehensive analysis of the latest advancements in AI in 2024. "
|
||||||
"Identify key trends, breakthrough technologies, and potential industry impacts. "
|
"Identify key trends, breakthrough technologies, and potential industry impacts. "
|
||||||
"Compile your findings in a detailed report. "
|
"Compile your findings in a detailed report. "
|
||||||
"Make sure to check with a human if the draft is good before finalizing your answer."
|
"Make sure to check with a human if the draft is good before finalizing your answer."
|
||||||
),
|
),
|
||||||
expected_output='A comprehensive full report on the latest AI advancements in 2025, leave nothing out',
|
expected_output='A comprehensive full report on the latest AI advancements in 2024, leave nothing out',
|
||||||
agent=researcher,
|
agent=researcher,
|
||||||
human_input=True
|
human_input=True
|
||||||
)
|
)
|
||||||
@@ -76,7 +76,7 @@ task2 = Task(
|
|||||||
"Your post should be informative yet accessible, catering to a tech-savvy audience. "
|
"Your post should be informative yet accessible, catering to a tech-savvy audience. "
|
||||||
"Aim for a narrative that captures the essence of these breakthroughs and their implications for the future."
|
"Aim for a narrative that captures the essence of these breakthroughs and their implications for the future."
|
||||||
),
|
),
|
||||||
expected_output='A compelling 3 paragraphs blog post formatted as markdown about the latest AI advancements in 2025',
|
expected_output='A compelling 3 paragraphs blog post formatted as markdown about the latest AI advancements in 2024',
|
||||||
agent=writer,
|
agent=writer,
|
||||||
human_input=True
|
human_input=True
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -54,8 +54,7 @@ coding_agent = Agent(
|
|||||||
# Create a task that requires code execution
|
# Create a task that requires code execution
|
||||||
data_analysis_task = Task(
|
data_analysis_task = Task(
|
||||||
description="Analyze the given dataset and calculate the average age of participants. Ages: {ages}",
|
description="Analyze the given dataset and calculate the average age of participants. Ages: {ages}",
|
||||||
agent=coding_agent,
|
agent=coding_agent
|
||||||
expected_output="The average age of the participants."
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create a crew and add the task
|
# Create a crew and add the task
|
||||||
@@ -117,4 +116,4 @@ async def async_multiple_crews():
|
|||||||
|
|
||||||
# Run the async function
|
# Run the async function
|
||||||
asyncio.run(async_multiple_crews())
|
asyncio.run(async_multiple_crews())
|
||||||
```
|
```
|
||||||
@@ -1,100 +0,0 @@
|
|||||||
---
|
|
||||||
title: Agent Monitoring with Langfuse
|
|
||||||
description: Learn how to integrate Langfuse with CrewAI via OpenTelemetry using OpenLit
|
|
||||||
icon: magnifying-glass-chart
|
|
||||||
---
|
|
||||||
|
|
||||||
# Integrate Langfuse with CrewAI
|
|
||||||
|
|
||||||
This notebook demonstrates how to integrate **Langfuse** with **CrewAI** using OpenTelemetry via the **OpenLit** SDK. By the end of this notebook, you will be able to trace your CrewAI applications with Langfuse for improved observability and debugging.
|
|
||||||
|
|
||||||
> **What is Langfuse?** [Langfuse](https://langfuse.com) is an open-source LLM engineering platform. It provides tracing and monitoring capabilities for LLM applications, helping developers debug, analyze, and optimize their AI systems. Langfuse integrates with various tools and frameworks via native integrations, OpenTelemetry, and APIs/SDKs.
|
|
||||||
|
|
||||||
[](https://langfuse.com/watch-demo)
|
|
||||||
|
|
||||||
## Get Started
|
|
||||||
|
|
||||||
We'll walk through a simple example of using CrewAI and integrating it with Langfuse via OpenTelemetry using OpenLit.
|
|
||||||
|
|
||||||
### Step 1: Install Dependencies
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
|
||||||
%pip install langfuse openlit crewai crewai_tools
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 2: Set Up Environment Variables
|
|
||||||
|
|
||||||
Set your Langfuse API keys and configure OpenTelemetry export settings to send traces to Langfuse. Please refer to the [Langfuse OpenTelemetry Docs](https://langfuse.com/docs/opentelemetry/get-started) for more information on the Langfuse OpenTelemetry endpoint `/api/public/otel` and authentication.
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
|
||||||
import os
|
|
||||||
import base64
|
|
||||||
|
|
||||||
LANGFUSE_PUBLIC_KEY="pk-lf-..."
|
|
||||||
LANGFUSE_SECRET_KEY="sk-lf-..."
|
|
||||||
LANGFUSE_AUTH=base64.b64encode(f"{LANGFUSE_PUBLIC_KEY}:{LANGFUSE_SECRET_KEY}".encode()).decode()
|
|
||||||
|
|
||||||
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://cloud.langfuse.com/api/public/otel" # EU data region
|
|
||||||
# os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://us.cloud.langfuse.com/api/public/otel" # US data region
|
|
||||||
os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = f"Authorization=Basic {LANGFUSE_AUTH}"
|
|
||||||
|
|
||||||
# your openai key
|
|
||||||
os.environ["OPENAI_API_KEY"] = "sk-..."
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 3: Initialize OpenLit
|
|
||||||
|
|
||||||
Initialize the OpenLit OpenTelemetry instrumentation SDK to start capturing OpenTelemetry traces.
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
|
||||||
import openlit
|
|
||||||
|
|
||||||
openlit.init()
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 4: Create a Simple CrewAI Application
|
|
||||||
|
|
||||||
We'll create a simple CrewAI application where multiple agents collaborate to answer a user's question.
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
|
||||||
from crewai import Agent, Task, Crew
|
|
||||||
|
|
||||||
from crewai_tools import (
|
|
||||||
WebsiteSearchTool
|
|
||||||
)
|
|
||||||
|
|
||||||
web_rag_tool = WebsiteSearchTool()
|
|
||||||
|
|
||||||
writer = Agent(
|
|
||||||
role="Writer",
|
|
||||||
goal="You make math engaging and understandable for young children through poetry",
|
|
||||||
backstory="You're an expert in writing haikus but you know nothing of math.",
|
|
||||||
tools=[web_rag_tool],
|
|
||||||
)
|
|
||||||
|
|
||||||
task = Task(description=("What is {multiplication}?"),
|
|
||||||
expected_output=("Compose a haiku that includes the answer."),
|
|
||||||
agent=writer)
|
|
||||||
|
|
||||||
crew = Crew(
|
|
||||||
agents=[writer],
|
|
||||||
tasks=[task],
|
|
||||||
share_crew=False
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 5: See Traces in Langfuse
|
|
||||||
|
|
||||||
After running the agent, you can view the traces generated by your CrewAI application in [Langfuse](https://cloud.langfuse.com). You should see detailed steps of the LLM interactions, which can help you debug and optimize your AI agent.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
_[Public example trace in Langfuse](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/e2cf380ffc8d47d28da98f136140642b?timestamp=2025-02-05T15%3A12%3A02.717Z&observation=3b32338ee6a5d9af)_
|
|
||||||
|
|
||||||
## References
|
|
||||||
|
|
||||||
- [Langfuse OpenTelemetry Docs](https://langfuse.com/docs/opentelemetry/get-started)
|
|
||||||
@@ -23,7 +23,6 @@ LiteLLM supports a wide range of providers, including but not limited to:
|
|||||||
- Azure OpenAI
|
- Azure OpenAI
|
||||||
- AWS (Bedrock, SageMaker)
|
- AWS (Bedrock, SageMaker)
|
||||||
- Cohere
|
- Cohere
|
||||||
- VoyageAI
|
|
||||||
- Hugging Face
|
- Hugging Face
|
||||||
- Ollama
|
- Ollama
|
||||||
- Mistral AI
|
- Mistral AI
|
||||||
|
|||||||
@@ -1,206 +0,0 @@
|
|||||||
---
|
|
||||||
title: Agent Monitoring with MLflow
|
|
||||||
description: Quickly start monitoring your Agents with MLflow.
|
|
||||||
icon: bars-staggered
|
|
||||||
---
|
|
||||||
|
|
||||||
# MLflow Overview
|
|
||||||
|
|
||||||
[MLflow](https://mlflow.org/) is an open-source platform to assist machine learning practitioners and teams in handling the complexities of the machine learning process.
|
|
||||||
|
|
||||||
It provides a tracing feature that enhances LLM observability in your Generative AI applications by capturing detailed information about the execution of your application’s services.
|
|
||||||
Tracing provides a way to record the inputs, outputs, and metadata associated with each intermediate step of a request, enabling you to easily pinpoint the source of bugs and unexpected behaviors.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
- **Tracing Dashboard**: Monitor activities of your crewAI agents with detailed dashboards that include inputs, outputs and metadata of spans.
|
|
||||||
- **Automated Tracing**: A fully automated integration with crewAI, which can be enabled by running `mlflow.crewai.autolog()`.
|
|
||||||
- **Manual Trace Instrumentation with minor efforts**: Customize trace instrumentation through MLflow's high-level fluent APIs such as decorators, function wrappers and context managers.
|
|
||||||
- **OpenTelemetry Compatibility**: MLflow Tracing supports exporting traces to an OpenTelemetry Collector, which can then be used to export traces to various backends such as Jaeger, Zipkin, and AWS X-Ray.
|
|
||||||
- **Package and Deploy Agents**: Package and deploy your crewAI agents to an inference server with a variety of deployment targets.
|
|
||||||
- **Securely Host LLMs**: Host multiple LLM from various providers in one unified endpoint through MFflow gateway.
|
|
||||||
- **Evaluation**: Evaluate your crewAI agents with a wide range of metrics using a convenient API `mlflow.evaluate()`.
|
|
||||||
|
|
||||||
## Setup Instructions
|
|
||||||
|
|
||||||
<Steps>
|
|
||||||
<Step title="Install MLflow package">
|
|
||||||
```shell
|
|
||||||
# The crewAI integration is available in mlflow>=2.19.0
|
|
||||||
pip install mlflow
|
|
||||||
```
|
|
||||||
</Step>
|
|
||||||
<Step title="Start MFflow tracking server">
|
|
||||||
```shell
|
|
||||||
# This process is optional, but it is recommended to use MLflow tracking server for better visualization and broader features.
|
|
||||||
mlflow server
|
|
||||||
```
|
|
||||||
</Step>
|
|
||||||
<Step title="Initialize MLflow in Your Application">
|
|
||||||
Add the following two lines to your application code:
|
|
||||||
|
|
||||||
```python
|
|
||||||
import mlflow
|
|
||||||
|
|
||||||
mlflow.crewai.autolog()
|
|
||||||
|
|
||||||
# Optional: Set a tracking URI and an experiment name if you have a tracking server
|
|
||||||
mlflow.set_tracking_uri("http://localhost:5000")
|
|
||||||
mlflow.set_experiment("CrewAI")
|
|
||||||
```
|
|
||||||
|
|
||||||
Example Usage for tracing CrewAI Agents:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from crewai import Agent, Crew, Task
|
|
||||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
|
||||||
from crewai_tools import SerperDevTool, WebsiteSearchTool
|
|
||||||
|
|
||||||
from textwrap import dedent
|
|
||||||
|
|
||||||
content = "Users name is John. He is 30 years old and lives in San Francisco."
|
|
||||||
string_source = StringKnowledgeSource(
|
|
||||||
content=content, metadata={"preference": "personal"}
|
|
||||||
)
|
|
||||||
|
|
||||||
search_tool = WebsiteSearchTool()
|
|
||||||
|
|
||||||
|
|
||||||
class TripAgents:
|
|
||||||
def city_selection_agent(self):
|
|
||||||
return Agent(
|
|
||||||
role="City Selection Expert",
|
|
||||||
goal="Select the best city based on weather, season, and prices",
|
|
||||||
backstory="An expert in analyzing travel data to pick ideal destinations",
|
|
||||||
tools=[
|
|
||||||
search_tool,
|
|
||||||
],
|
|
||||||
verbose=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
def local_expert(self):
|
|
||||||
return Agent(
|
|
||||||
role="Local Expert at this city",
|
|
||||||
goal="Provide the BEST insights about the selected city",
|
|
||||||
backstory="""A knowledgeable local guide with extensive information
|
|
||||||
about the city, it's attractions and customs""",
|
|
||||||
tools=[search_tool],
|
|
||||||
verbose=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TripTasks:
|
|
||||||
def identify_task(self, agent, origin, cities, interests, range):
|
|
||||||
return Task(
|
|
||||||
description=dedent(
|
|
||||||
f"""
|
|
||||||
Analyze and select the best city for the trip based
|
|
||||||
on specific criteria such as weather patterns, seasonal
|
|
||||||
events, and travel costs. This task involves comparing
|
|
||||||
multiple cities, considering factors like current weather
|
|
||||||
conditions, upcoming cultural or seasonal events, and
|
|
||||||
overall travel expenses.
|
|
||||||
Your final answer must be a detailed
|
|
||||||
report on the chosen city, and everything you found out
|
|
||||||
about it, including the actual flight costs, weather
|
|
||||||
forecast and attractions.
|
|
||||||
|
|
||||||
Traveling from: {origin}
|
|
||||||
City Options: {cities}
|
|
||||||
Trip Date: {range}
|
|
||||||
Traveler Interests: {interests}
|
|
||||||
"""
|
|
||||||
),
|
|
||||||
agent=agent,
|
|
||||||
expected_output="Detailed report on the chosen city including flight costs, weather forecast, and attractions",
|
|
||||||
)
|
|
||||||
|
|
||||||
def gather_task(self, agent, origin, interests, range):
|
|
||||||
return Task(
|
|
||||||
description=dedent(
|
|
||||||
f"""
|
|
||||||
As a local expert on this city you must compile an
|
|
||||||
in-depth guide for someone traveling there and wanting
|
|
||||||
to have THE BEST trip ever!
|
|
||||||
Gather information about key attractions, local customs,
|
|
||||||
special events, and daily activity recommendations.
|
|
||||||
Find the best spots to go to, the kind of place only a
|
|
||||||
local would know.
|
|
||||||
This guide should provide a thorough overview of what
|
|
||||||
the city has to offer, including hidden gems, cultural
|
|
||||||
hotspots, must-visit landmarks, weather forecasts, and
|
|
||||||
high level costs.
|
|
||||||
The final answer must be a comprehensive city guide,
|
|
||||||
rich in cultural insights and practical tips,
|
|
||||||
tailored to enhance the travel experience.
|
|
||||||
|
|
||||||
Trip Date: {range}
|
|
||||||
Traveling from: {origin}
|
|
||||||
Traveler Interests: {interests}
|
|
||||||
"""
|
|
||||||
),
|
|
||||||
agent=agent,
|
|
||||||
expected_output="Comprehensive city guide including hidden gems, cultural hotspots, and practical travel tips",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TripCrew:
|
|
||||||
def __init__(self, origin, cities, date_range, interests):
|
|
||||||
self.cities = cities
|
|
||||||
self.origin = origin
|
|
||||||
self.interests = interests
|
|
||||||
self.date_range = date_range
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
agents = TripAgents()
|
|
||||||
tasks = TripTasks()
|
|
||||||
|
|
||||||
city_selector_agent = agents.city_selection_agent()
|
|
||||||
local_expert_agent = agents.local_expert()
|
|
||||||
|
|
||||||
identify_task = tasks.identify_task(
|
|
||||||
city_selector_agent,
|
|
||||||
self.origin,
|
|
||||||
self.cities,
|
|
||||||
self.interests,
|
|
||||||
self.date_range,
|
|
||||||
)
|
|
||||||
gather_task = tasks.gather_task(
|
|
||||||
local_expert_agent, self.origin, self.interests, self.date_range
|
|
||||||
)
|
|
||||||
|
|
||||||
crew = Crew(
|
|
||||||
agents=[city_selector_agent, local_expert_agent],
|
|
||||||
tasks=[identify_task, gather_task],
|
|
||||||
verbose=True,
|
|
||||||
memory=True,
|
|
||||||
knowledge={
|
|
||||||
"sources": [string_source],
|
|
||||||
"metadata": {"preference": "personal"},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
result = crew.kickoff()
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
trip_crew = TripCrew("California", "Tokyo", "Dec 12 - Dec 20", "sports")
|
|
||||||
result = trip_crew.run()
|
|
||||||
|
|
||||||
print(result)
|
|
||||||
```
|
|
||||||
Refer to [MLflow Tracing Documentation](https://mlflow.org/docs/latest/llms/tracing/index.html) for more configurations and use cases.
|
|
||||||
</Step>
|
|
||||||
<Step title="Visualize Activities of Agents">
|
|
||||||
Now traces for your crewAI agents are captured by MLflow.
|
|
||||||
Let's visit MLflow tracking server to view the traces and get insights into your Agents.
|
|
||||||
|
|
||||||
Open `127.0.0.1:5000` on your browser to visit MLflow tracking server.
|
|
||||||
<Frame caption="MLflow Tracing Dashboard">
|
|
||||||
<img src="/images/mlflow1.png" alt="MLflow tracing example with crewai" />
|
|
||||||
</Frame>
|
|
||||||
</Step>
|
|
||||||
</Steps>
|
|
||||||
|
|
||||||
@@ -1,14 +1,14 @@
|
|||||||
---
|
---
|
||||||
title: Using Multimodal Agents
|
title: Using Multimodal Agents
|
||||||
description: Learn how to enable and use multimodal capabilities in your agents for processing images and other non-text content within the CrewAI framework.
|
description: Learn how to enable and use multimodal capabilities in your agents for processing images and other non-text content within the CrewAI framework.
|
||||||
icon: video
|
icon: image
|
||||||
---
|
---
|
||||||
|
|
||||||
## Using Multimodal Agents
|
# Using Multimodal Agents
|
||||||
|
|
||||||
CrewAI supports multimodal agents that can process both text and non-text content like images. This guide will show you how to enable and use multimodal capabilities in your agents.
|
CrewAI supports multimodal agents that can process both text and non-text content like images. This guide will show you how to enable and use multimodal capabilities in your agents.
|
||||||
|
|
||||||
### Enabling Multimodal Capabilities
|
## Enabling Multimodal Capabilities
|
||||||
|
|
||||||
To create a multimodal agent, simply set the `multimodal` parameter to `True` when initializing your agent:
|
To create a multimodal agent, simply set the `multimodal` parameter to `True` when initializing your agent:
|
||||||
|
|
||||||
@@ -25,7 +25,7 @@ agent = Agent(
|
|||||||
|
|
||||||
When you set `multimodal=True`, the agent is automatically configured with the necessary tools for handling non-text content, including the `AddImageTool`.
|
When you set `multimodal=True`, the agent is automatically configured with the necessary tools for handling non-text content, including the `AddImageTool`.
|
||||||
|
|
||||||
### Working with Images
|
## Working with Images
|
||||||
|
|
||||||
The multimodal agent comes pre-configured with the `AddImageTool`, which allows it to process images. You don't need to manually add this tool - it's automatically included when you enable multimodal capabilities.
|
The multimodal agent comes pre-configured with the `AddImageTool`, which allows it to process images. You don't need to manually add this tool - it's automatically included when you enable multimodal capabilities.
|
||||||
|
|
||||||
@@ -45,7 +45,6 @@ image_analyst = Agent(
|
|||||||
# Create a task for image analysis
|
# Create a task for image analysis
|
||||||
task = Task(
|
task = Task(
|
||||||
description="Analyze the product image at https://example.com/product.jpg and provide a detailed description",
|
description="Analyze the product image at https://example.com/product.jpg and provide a detailed description",
|
||||||
expected_output="A detailed description of the product image",
|
|
||||||
agent=image_analyst
|
agent=image_analyst
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -82,7 +81,6 @@ inspection_task = Task(
|
|||||||
3. Compliance with standards
|
3. Compliance with standards
|
||||||
Provide a detailed report highlighting any issues found.
|
Provide a detailed report highlighting any issues found.
|
||||||
""",
|
""",
|
||||||
expected_output="A detailed report highlighting any issues found",
|
|
||||||
agent=expert_analyst
|
agent=expert_analyst
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -110,7 +108,7 @@ The multimodal agent will automatically handle the image processing through its
|
|||||||
- Process image content with optional context or specific questions
|
- Process image content with optional context or specific questions
|
||||||
- Provide analysis and insights based on the visual information and task requirements
|
- Provide analysis and insights based on the visual information and task requirements
|
||||||
|
|
||||||
### Best Practices
|
## Best Practices
|
||||||
|
|
||||||
When working with multimodal agents, keep these best practices in mind:
|
When working with multimodal agents, keep these best practices in mind:
|
||||||
|
|
||||||
|
|||||||
211
docs/how-to/portkey-observability-and-guardrails.mdx
Normal file
211
docs/how-to/portkey-observability-and-guardrails.mdx
Normal file
@@ -0,0 +1,211 @@
|
|||||||
|
# Portkey Integration with CrewAI
|
||||||
|
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-CrewAI.png" alt="Portkey CrewAI Header Image" width="70%" />
|
||||||
|
|
||||||
|
|
||||||
|
[Portkey](https://portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai) is a 2-line upgrade to make your CrewAI agents reliable, cost-efficient, and fast.
|
||||||
|
|
||||||
|
Portkey adds 4 core production capabilities to any CrewAI agent:
|
||||||
|
1. Routing to **200+ LLMs**
|
||||||
|
2. Making each LLM call more robust
|
||||||
|
3. Full-stack tracing & cost, performance analytics
|
||||||
|
4. Real-time guardrails to enforce behavior
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
1. **Install Required Packages:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -qU crewai portkey-ai
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Configure the LLM Client:**
|
||||||
|
|
||||||
|
To build CrewAI Agents with Portkey, you'll need two keys:
|
||||||
|
- **Portkey API Key**: Sign up on the [Portkey app](https://app.portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai) and copy your API key
|
||||||
|
- **Virtual Key**: Virtual Keys securely manage your LLM API keys in one place. Store your LLM provider API keys securely in Portkey's vault
|
||||||
|
|
||||||
|
```python
|
||||||
|
from crewai import LLM
|
||||||
|
from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
|
||||||
|
|
||||||
|
gpt_llm = LLM(
|
||||||
|
model="gpt-4",
|
||||||
|
base_url=PORTKEY_GATEWAY_URL,
|
||||||
|
api_key="dummy", # We are using Virtual key
|
||||||
|
extra_headers=createHeaders(
|
||||||
|
api_key="YOUR_PORTKEY_API_KEY",
|
||||||
|
virtual_key="YOUR_VIRTUAL_KEY", # Enter your Virtual key from Portkey
|
||||||
|
)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Create and Run Your First Agent:**
|
||||||
|
|
||||||
|
```python
|
||||||
|
from crewai import Agent, Task, Crew
|
||||||
|
|
||||||
|
# Define your agents with roles and goals
|
||||||
|
coder = Agent(
|
||||||
|
role='Software developer',
|
||||||
|
goal='Write clear, concise code on demand',
|
||||||
|
backstory='An expert coder with a keen eye for software trends.',
|
||||||
|
llm=gpt_llm
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create tasks for your agents
|
||||||
|
task1 = Task(
|
||||||
|
description="Define the HTML for making a simple website with heading- Hello World! Portkey is working!",
|
||||||
|
expected_output="A clear and concise HTML code",
|
||||||
|
agent=coder
|
||||||
|
)
|
||||||
|
|
||||||
|
# Instantiate your crew
|
||||||
|
crew = Crew(
|
||||||
|
agents=[coder],
|
||||||
|
tasks=[task1],
|
||||||
|
)
|
||||||
|
|
||||||
|
result = crew.kickoff()
|
||||||
|
print(result)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Key Features
|
||||||
|
|
||||||
|
| Feature | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| 🌐 Multi-LLM Support | Access OpenAI, Anthropic, Gemini, Azure, and 250+ providers through a unified interface |
|
||||||
|
| 🛡️ Production Reliability | Implement retries, timeouts, load balancing, and fallbacks |
|
||||||
|
| 📊 Advanced Observability | Track 40+ metrics including costs, tokens, latency, and custom metadata |
|
||||||
|
| 🔍 Comprehensive Logging | Debug with detailed execution traces and function call logs |
|
||||||
|
| 🚧 Security Controls | Set budget limits and implement role-based access control |
|
||||||
|
| 🔄 Performance Analytics | Capture and analyze feedback for continuous improvement |
|
||||||
|
| 💾 Intelligent Caching | Reduce costs and latency with semantic or simple caching |
|
||||||
|
|
||||||
|
|
||||||
|
## Production Features with Portkey Configs
|
||||||
|
|
||||||
|
All features mentioned below are through Portkey's Config system. Portkey's Config system allows you to define routing strategies using simple JSON objects in your LLM API calls. You can create and manage Configs directly in your code or through the Portkey Dashboard. Each Config has a unique ID for easy reference.
|
||||||
|
|
||||||
|
<Frame>
|
||||||
|
<img src="https://raw.githubusercontent.com/Portkey-AI/docs-core/refs/heads/main/images/libraries/libraries-3.avif"/>
|
||||||
|
</Frame>
|
||||||
|
|
||||||
|
|
||||||
|
### 1. Use 250+ LLMs
|
||||||
|
Access various LLMs like Anthropic, Gemini, Mistral, Azure OpenAI, and more with minimal code changes. Switch between providers or use them together seamlessly. [Learn more about Universal API](https://portkey.ai/docs/product/ai-gateway/universal-api)
|
||||||
|
|
||||||
|
|
||||||
|
Easily switch between different LLM providers:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Anthropic Configuration
|
||||||
|
anthropic_llm = LLM(
|
||||||
|
model="claude-3-5-sonnet-latest",
|
||||||
|
base_url=PORTKEY_GATEWAY_URL,
|
||||||
|
api_key="dummy",
|
||||||
|
extra_headers=createHeaders(
|
||||||
|
api_key="YOUR_PORTKEY_API_KEY",
|
||||||
|
virtual_key="YOUR_ANTHROPIC_VIRTUAL_KEY", #You don't need provider when using Virtual keys
|
||||||
|
trace_id="anthropic_agent"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Azure OpenAI Configuration
|
||||||
|
azure_llm = LLM(
|
||||||
|
model="gpt-4",
|
||||||
|
base_url=PORTKEY_GATEWAY_URL,
|
||||||
|
api_key="dummy",
|
||||||
|
extra_headers=createHeaders(
|
||||||
|
api_key="YOUR_PORTKEY_API_KEY",
|
||||||
|
virtual_key="YOUR_AZURE_VIRTUAL_KEY", #You don't need provider when using Virtual keys
|
||||||
|
trace_id="azure_agent"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### 2. Caching
|
||||||
|
Improve response times and reduce costs with two powerful caching modes:
|
||||||
|
- **Simple Cache**: Perfect for exact matches
|
||||||
|
- **Semantic Cache**: Matches responses for requests that are semantically similar
|
||||||
|
[Learn more about Caching](https://portkey.ai/docs/product/ai-gateway/cache-simple-and-semantic)
|
||||||
|
|
||||||
|
```py
|
||||||
|
config = {
|
||||||
|
"cache": {
|
||||||
|
"mode": "semantic", # or "simple" for exact matching
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Production Reliability
|
||||||
|
Portkey provides comprehensive reliability features:
|
||||||
|
- **Automatic Retries**: Handle temporary failures gracefully
|
||||||
|
- **Request Timeouts**: Prevent hanging operations
|
||||||
|
- **Conditional Routing**: Route requests based on specific conditions
|
||||||
|
- **Fallbacks**: Set up automatic provider failovers
|
||||||
|
- **Load Balancing**: Distribute requests efficiently
|
||||||
|
|
||||||
|
[Learn more about Reliability Features](https://portkey.ai/docs/product/ai-gateway/)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### 4. Metrics
|
||||||
|
|
||||||
|
Agent runs are complex. Portkey automatically logs **40+ comprehensive metrics** for your AI agents, including cost, tokens used, latency, etc. Whether you need a broad overview or granular insights into your agent runs, Portkey's customizable filters provide the metrics you need.
|
||||||
|
|
||||||
|
|
||||||
|
- Cost per agent interaction
|
||||||
|
- Response times and latency
|
||||||
|
- Token usage and efficiency
|
||||||
|
- Success/failure rates
|
||||||
|
- Cache hit rates
|
||||||
|
|
||||||
|
<img src="https://github.com/siddharthsambharia-portkey/Portkey-Product-Images/blob/main/Portkey-Dashboard.png?raw=true" width="70%" alt="Portkey Dashboard" />
|
||||||
|
|
||||||
|
### 5. Detailed Logging
|
||||||
|
Logs are essential for understanding agent behavior, diagnosing issues, and improving performance. They provide a detailed record of agent activities and tool use, which is crucial for debugging and optimizing processes.
|
||||||
|
|
||||||
|
|
||||||
|
Access a dedicated section to view records of agent executions, including parameters, outcomes, function calls, and errors. Filter logs based on multiple parameters such as trace ID, model, tokens used, and metadata.
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><b>Traces</b></summary>
|
||||||
|
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-Traces.png" alt="Portkey Traces" width="70%" />
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><b>Logs</b></summary>
|
||||||
|
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-Logs.png" alt="Portkey Logs" width="70%" />
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### 6. Enterprise Security Features
|
||||||
|
- Set budget limit and rate limts per Virtual Key (disposable API keys)
|
||||||
|
- Implement role-based access control
|
||||||
|
- Track system changes with audit logs
|
||||||
|
- Configure data retention policies
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
For detailed information on creating and managing Configs, visit the [Portkey documentation](https://docs.portkey.ai/product/ai-gateway/configs).
|
||||||
|
|
||||||
|
## Resources
|
||||||
|
|
||||||
|
- [📘 Portkey Documentation](https://docs.portkey.ai)
|
||||||
|
- [📊 Portkey Dashboard](https://app.portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai)
|
||||||
|
- [🐦 Twitter](https://twitter.com/portkeyai)
|
||||||
|
- [💬 Discord Community](https://discord.gg/DD7vgKK299)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
title: Agent Monitoring with Portkey
|
title: Portkey Observability and Guardrails
|
||||||
description: How to use Portkey with CrewAI
|
description: How to use Portkey with CrewAI
|
||||||
icon: key
|
icon: key
|
||||||
---
|
---
|
||||||
|
|||||||
Binary file not shown.
|
Before Width: | Height: | Size: 16 MiB |
Binary file not shown.
|
Before Width: | Height: | Size: 382 KiB |
@@ -15,48 +15,10 @@ icon: wrench
|
|||||||
If you need to update Python, visit [python.org/downloads](https://python.org/downloads)
|
If you need to update Python, visit [python.org/downloads](https://python.org/downloads)
|
||||||
</Note>
|
</Note>
|
||||||
|
|
||||||
# Setting Up Your Environment
|
|
||||||
|
|
||||||
Before installing CrewAI, it's recommended to set up a virtual environment. This helps isolate your project dependencies and avoid conflicts.
|
|
||||||
|
|
||||||
<Steps>
|
|
||||||
<Step title="Create a Virtual Environment">
|
|
||||||
Choose your preferred method to create a virtual environment:
|
|
||||||
|
|
||||||
**Using venv (Python's built-in tool):**
|
|
||||||
```shell Terminal
|
|
||||||
python3 -m venv .venv
|
|
||||||
```
|
|
||||||
|
|
||||||
**Using conda:**
|
|
||||||
```shell Terminal
|
|
||||||
conda create -n crewai-env python=3.12
|
|
||||||
```
|
|
||||||
</Step>
|
|
||||||
|
|
||||||
<Step title="Activate the Virtual Environment">
|
|
||||||
Activate your virtual environment based on your platform:
|
|
||||||
|
|
||||||
**On macOS/Linux (venv):**
|
|
||||||
```shell Terminal
|
|
||||||
source .venv/bin/activate
|
|
||||||
```
|
|
||||||
|
|
||||||
**On Windows (venv):**
|
|
||||||
```shell Terminal
|
|
||||||
.venv\Scripts\activate
|
|
||||||
```
|
|
||||||
|
|
||||||
**Using conda (all platforms):**
|
|
||||||
```shell Terminal
|
|
||||||
conda activate crewai-env
|
|
||||||
```
|
|
||||||
</Step>
|
|
||||||
</Steps>
|
|
||||||
|
|
||||||
# Installing CrewAI
|
# Installing CrewAI
|
||||||
|
|
||||||
Now let's get you set up! 🚀
|
CrewAI is a flexible and powerful AI framework that enables you to create and manage AI agents, tools, and tasks efficiently.
|
||||||
|
Let's get you set up! 🚀
|
||||||
|
|
||||||
<Steps>
|
<Steps>
|
||||||
<Step title="Install CrewAI">
|
<Step title="Install CrewAI">
|
||||||
@@ -110,9 +72,9 @@ Now let's get you set up! 🚀
|
|||||||
|
|
||||||
# Creating a New Project
|
# Creating a New Project
|
||||||
|
|
||||||
<Tip>
|
<Info>
|
||||||
We recommend using the YAML Template scaffolding for a structured approach to defining agents and tasks.
|
We recommend using the YAML Template scaffolding for a structured approach to defining agents and tasks.
|
||||||
</Tip>
|
</Info>
|
||||||
|
|
||||||
<Steps>
|
<Steps>
|
||||||
<Step title="Generate Project Structure">
|
<Step title="Generate Project Structure">
|
||||||
@@ -142,18 +104,7 @@ Now let's get you set up! 🚀
|
|||||||
└── tasks.yaml
|
└── tasks.yaml
|
||||||
```
|
```
|
||||||
</Frame>
|
</Frame>
|
||||||
</Step>
|
</Step>
|
||||||
|
|
||||||
<Step title="Install Additional Tools">
|
|
||||||
You can install additional tools using UV:
|
|
||||||
```shell Terminal
|
|
||||||
uv add <tool-name>
|
|
||||||
```
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
UV is our preferred package manager as it's significantly faster than pip and provides better dependency resolution.
|
|
||||||
</Tip>
|
|
||||||
</Step>
|
|
||||||
|
|
||||||
<Step title="Customize Your Project">
|
<Step title="Customize Your Project">
|
||||||
Your project will contain these essential files:
|
Your project will contain these essential files:
|
||||||
|
|||||||
@@ -91,7 +91,6 @@
|
|||||||
"how-to/custom-manager-agent",
|
"how-to/custom-manager-agent",
|
||||||
"how-to/llm-connections",
|
"how-to/llm-connections",
|
||||||
"how-to/customizing-agents",
|
"how-to/customizing-agents",
|
||||||
"how-to/multimodal-agents",
|
|
||||||
"how-to/coding-agents",
|
"how-to/coding-agents",
|
||||||
"how-to/force-tool-output-as-result",
|
"how-to/force-tool-output-as-result",
|
||||||
"how-to/human-input-on-execution",
|
"how-to/human-input-on-execution",
|
||||||
@@ -101,10 +100,8 @@
|
|||||||
"how-to/conditional-tasks",
|
"how-to/conditional-tasks",
|
||||||
"how-to/agentops-observability",
|
"how-to/agentops-observability",
|
||||||
"how-to/langtrace-observability",
|
"how-to/langtrace-observability",
|
||||||
"how-to/mlflow-observability",
|
|
||||||
"how-to/openlit-observability",
|
"how-to/openlit-observability",
|
||||||
"how-to/portkey-observability",
|
"how-to/portkey-observability"
|
||||||
"how-to/langfuse-observability"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ Follow the steps below to get crewing! 🚣♂️
|
|||||||
description: >
|
description: >
|
||||||
Conduct a thorough research about {topic}
|
Conduct a thorough research about {topic}
|
||||||
Make sure you find any interesting and relevant information given
|
Make sure you find any interesting and relevant information given
|
||||||
the current year is 2025.
|
the current year is 2024.
|
||||||
expected_output: >
|
expected_output: >
|
||||||
A list with 10 bullet points of the most relevant information about {topic}
|
A list with 10 bullet points of the most relevant information about {topic}
|
||||||
agent: researcher
|
agent: researcher
|
||||||
@@ -195,10 +195,10 @@ Follow the steps below to get crewing! 🚣♂️
|
|||||||
|
|
||||||
<CodeGroup>
|
<CodeGroup>
|
||||||
```markdown output/report.md
|
```markdown output/report.md
|
||||||
# Comprehensive Report on the Rise and Impact of AI Agents in 2025
|
# Comprehensive Report on the Rise and Impact of AI Agents in 2024
|
||||||
|
|
||||||
## 1. Introduction to AI Agents
|
## 1. Introduction to AI Agents
|
||||||
In 2025, Artificial Intelligence (AI) agents are at the forefront of innovation across various industries. As intelligent systems that can perform tasks typically requiring human cognition, AI agents are paving the way for significant advancements in operational efficiency, decision-making, and overall productivity within sectors like Human Resources (HR) and Finance. This report aims to detail the rise of AI agents, their frameworks, applications, and potential implications on the workforce.
|
In 2024, Artificial Intelligence (AI) agents are at the forefront of innovation across various industries. As intelligent systems that can perform tasks typically requiring human cognition, AI agents are paving the way for significant advancements in operational efficiency, decision-making, and overall productivity within sectors like Human Resources (HR) and Finance. This report aims to detail the rise of AI agents, their frameworks, applications, and potential implications on the workforce.
|
||||||
|
|
||||||
## 2. Benefits of AI Agents
|
## 2. Benefits of AI Agents
|
||||||
AI agents bring numerous advantages that are transforming traditional work environments. Key benefits include:
|
AI agents bring numerous advantages that are transforming traditional work environments. Key benefits include:
|
||||||
@@ -252,7 +252,7 @@ Follow the steps below to get crewing! 🚣♂️
|
|||||||
To stay competitive and harness the full potential of AI agents, organizations must remain vigilant about latest developments in AI technology and consider continuous learning and adaptation in their strategic planning.
|
To stay competitive and harness the full potential of AI agents, organizations must remain vigilant about latest developments in AI technology and consider continuous learning and adaptation in their strategic planning.
|
||||||
|
|
||||||
## 8. Conclusion
|
## 8. Conclusion
|
||||||
The emergence of AI agents is undeniably reshaping the workplace landscape in 5. With their ability to automate tasks, enhance efficiency, and improve decision-making, AI agents are critical in driving operational success. Organizations must embrace and adapt to AI developments to thrive in an increasingly digital business environment.
|
The emergence of AI agents is undeniably reshaping the workplace landscape in 2024. With their ability to automate tasks, enhance efficiency, and improve decision-making, AI agents are critical in driving operational success. Organizations must embrace and adapt to AI developments to thrive in an increasingly digital business environment.
|
||||||
```
|
```
|
||||||
</CodeGroup>
|
</CodeGroup>
|
||||||
</Step>
|
</Step>
|
||||||
@@ -278,7 +278,7 @@ email_summarizer:
|
|||||||
Summarize emails into a concise and clear summary
|
Summarize emails into a concise and clear summary
|
||||||
backstory: >
|
backstory: >
|
||||||
You will create a 5 bullet point summary of the report
|
You will create a 5 bullet point summary of the report
|
||||||
llm: openai/gpt-4o
|
llm: mixtal_llm
|
||||||
```
|
```
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|||||||
@@ -1,118 +1,78 @@
|
|||||||
---
|
---
|
||||||
title: Composio Tool
|
title: Composio Tool
|
||||||
description: Composio provides 250+ production-ready tools for AI agents with flexible authentication management.
|
description: The `ComposioTool` is a wrapper around the composio set of tools and gives your agent access to a wide variety of tools from the Composio SDK.
|
||||||
icon: gear-code
|
icon: gear-code
|
||||||
---
|
---
|
||||||
|
|
||||||
# `ComposioToolSet`
|
# `ComposioTool`
|
||||||
|
|
||||||
## Description
|
## Description
|
||||||
Composio is an integration platform that allows you to connect your AI agents to 250+ tools. Key features include:
|
|
||||||
|
|
||||||
- **Enterprise-Grade Authentication**: Built-in support for OAuth, API Keys, JWT with automatic token refresh
|
This tools is a wrapper around the composio set of tools and gives your agent access to a wide variety of tools from the Composio SDK.
|
||||||
- **Full Observability**: Detailed tool usage logs, execution timestamps, and more
|
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
To incorporate Composio tools into your project, follow the instructions below:
|
To incorporate this tool into your project, follow the installation instructions below:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
pip install composio-crewai
|
pip install composio-core
|
||||||
pip install crewai
|
pip install 'crewai[tools]'
|
||||||
```
|
```
|
||||||
|
|
||||||
After the installation is complete, either run `composio login` or export your composio API key as `COMPOSIO_API_KEY`. Get your Composio API key from [here](https://app.composio.dev)
|
after the installation is complete, either run `composio login` or export your composio API key as `COMPOSIO_API_KEY`.
|
||||||
|
|
||||||
## Example
|
## Example
|
||||||
|
|
||||||
The following example demonstrates how to initialize the tool and execute a github action:
|
The following example demonstrates how to initialize the tool and execute a github action:
|
||||||
|
|
||||||
1. Initialize Composio toolset
|
1. Initialize Composio tools
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from composio_crewai import ComposioToolSet, App, Action
|
from composio import App
|
||||||
from crewai import Agent, Task, Crew
|
from crewai_tools import ComposioTool
|
||||||
|
from crewai import Agent, Task
|
||||||
|
|
||||||
toolset = ComposioToolSet()
|
|
||||||
|
tools = [ComposioTool.from_action(action=Action.GITHUB_ACTIVITY_STAR_REPO_FOR_AUTHENTICATED_USER)]
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Connect your GitHub account
|
If you don't know what action you want to use, use `from_app` and `tags` filter to get relevant actions
|
||||||
<CodeGroup>
|
|
||||||
```shell CLI
|
|
||||||
composio add github
|
|
||||||
```
|
|
||||||
```python Code
|
```python Code
|
||||||
request = toolset.initiate_connection(app=App.GITHUB)
|
tools = ComposioTool.from_app(App.GITHUB, tags=["important"])
|
||||||
print(f"Open this URL to authenticate: {request.redirectUrl}")
|
|
||||||
```
|
```
|
||||||
</CodeGroup>
|
|
||||||
|
|
||||||
3. Get Tools
|
or use `use_case` to search relevant actions
|
||||||
|
|
||||||
- Retrieving all the tools from an app (not recommended for production):
|
|
||||||
```python Code
|
```python Code
|
||||||
tools = toolset.get_tools(apps=[App.GITHUB])
|
tools = ComposioTool.from_app(App.GITHUB, use_case="Star a github repository")
|
||||||
```
|
```
|
||||||
|
|
||||||
- Filtering tools based on tags:
|
2. Define agent
|
||||||
```python Code
|
|
||||||
tag = "users"
|
|
||||||
|
|
||||||
filtered_action_enums = toolset.find_actions_by_tags(
|
|
||||||
App.GITHUB,
|
|
||||||
tags=[tag],
|
|
||||||
)
|
|
||||||
|
|
||||||
tools = toolset.get_tools(actions=filtered_action_enums)
|
|
||||||
```
|
|
||||||
|
|
||||||
- Filtering tools based on use case:
|
|
||||||
```python Code
|
|
||||||
use_case = "Star a repository on GitHub"
|
|
||||||
|
|
||||||
filtered_action_enums = toolset.find_actions_by_use_case(
|
|
||||||
App.GITHUB, use_case=use_case, advanced=False
|
|
||||||
)
|
|
||||||
|
|
||||||
tools = toolset.get_tools(actions=filtered_action_enums)
|
|
||||||
```
|
|
||||||
<Tip>Set `advanced` to True to get actions for complex use cases</Tip>
|
|
||||||
|
|
||||||
- Using specific tools:
|
|
||||||
|
|
||||||
In this demo, we will use the `GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER` action from the GitHub app.
|
|
||||||
```python Code
|
|
||||||
tools = toolset.get_tools(
|
|
||||||
actions=[Action.GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER]
|
|
||||||
)
|
|
||||||
```
|
|
||||||
Learn more about filtering actions [here](https://docs.composio.dev/patterns/tools/use-tools/use-specific-actions)
|
|
||||||
|
|
||||||
4. Define agent
|
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
crewai_agent = Agent(
|
crewai_agent = Agent(
|
||||||
role="GitHub Agent",
|
role="Github Agent",
|
||||||
goal="You take action on GitHub using GitHub APIs",
|
goal="You take action on Github using Github APIs",
|
||||||
backstory="You are AI agent that is responsible for taking actions on GitHub on behalf of users using GitHub APIs",
|
backstory=(
|
||||||
|
"You are AI agent that is responsible for taking actions on Github "
|
||||||
|
"on users behalf. You need to take action on Github using Github APIs"
|
||||||
|
),
|
||||||
verbose=True,
|
verbose=True,
|
||||||
tools=tools,
|
tools=tools,
|
||||||
llm= # pass an llm
|
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Execute task
|
3. Execute task
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
task = Task(
|
task = Task(
|
||||||
description="Star a repo composiohq/composio on GitHub",
|
description="Star a repo ComposioHQ/composio on GitHub",
|
||||||
agent=crewai_agent,
|
agent=crewai_agent,
|
||||||
expected_output="Status of the operation",
|
expected_output="if the star happened",
|
||||||
)
|
)
|
||||||
|
|
||||||
crew = Crew(agents=[crewai_agent], tasks=[task])
|
task.execute()
|
||||||
|
|
||||||
crew.kickoff()
|
|
||||||
```
|
```
|
||||||
|
|
||||||
* More detailed list of tools can be found [here](https://app.composio.dev)
|
* More detailed list of tools can be found [here](https://app.composio.dev)
|
||||||
@@ -8,9 +8,9 @@ icon: file-pen
|
|||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
|
||||||
The `FileWriterTool` is a component of the crewai_tools package, designed to simplify the process of writing content to files with cross-platform compatibility (Windows, Linux, macOS).
|
The `FileWriterTool` is a component of the crewai_tools package, designed to simplify the process of writing content to files.
|
||||||
It is particularly useful in scenarios such as generating reports, saving logs, creating configuration files, and more.
|
It is particularly useful in scenarios such as generating reports, saving logs, creating configuration files, and more.
|
||||||
This tool handles path differences across operating systems, supports UTF-8 encoding, and automatically creates directories if they don't exist, making it easier to organize your output reliably across different platforms.
|
This tool supports creating new directories if they don't exist, making it easier to organize your output.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
@@ -43,8 +43,6 @@ print(result)
|
|||||||
|
|
||||||
## Conclusion
|
## Conclusion
|
||||||
|
|
||||||
By integrating the `FileWriterTool` into your crews, the agents can reliably write content to files across different operating systems.
|
By integrating the `FileWriterTool` into your crews, the agents can execute the process of writing content to files and creating directories.
|
||||||
This tool is essential for tasks that require saving output data, creating structured file systems, and handling cross-platform file operations.
|
This tool is essential for tasks that require saving output data, creating structured file systems, and more. By adhering to the setup and usage guidelines provided,
|
||||||
It's particularly recommended for Windows users who may encounter file writing issues with standard Python file operations.
|
incorporating this tool into projects is straightforward and efficient.
|
||||||
|
|
||||||
By adhering to the setup and usage guidelines provided, incorporating this tool into projects is straightforward and ensures consistent file writing behavior across all platforms.
|
|
||||||
@@ -152,7 +152,6 @@ nav:
|
|||||||
- Agent Monitoring with AgentOps: 'how-to/AgentOps-Observability.md'
|
- Agent Monitoring with AgentOps: 'how-to/AgentOps-Observability.md'
|
||||||
- Agent Monitoring with LangTrace: 'how-to/Langtrace-Observability.md'
|
- Agent Monitoring with LangTrace: 'how-to/Langtrace-Observability.md'
|
||||||
- Agent Monitoring with OpenLIT: 'how-to/openlit-Observability.md'
|
- Agent Monitoring with OpenLIT: 'how-to/openlit-Observability.md'
|
||||||
- Agent Monitoring with MLflow: 'how-to/mlflow-Observability.md'
|
|
||||||
- Tools Docs:
|
- Tools Docs:
|
||||||
- Browserbase Web Loader: 'tools/BrowserbaseLoadTool.md'
|
- Browserbase Web Loader: 'tools/BrowserbaseLoadTool.md'
|
||||||
- Code Docs RAG Search: 'tools/CodeDocsSearchTool.md'
|
- Code Docs RAG Search: 'tools/CodeDocsSearchTool.md'
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "crewai"
|
name = "crewai"
|
||||||
version = "0.102.0"
|
version = "0.95.0"
|
||||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.10,<3.13"
|
requires-python = ">=3.10,<3.13"
|
||||||
@@ -11,22 +11,27 @@ dependencies = [
|
|||||||
# Core Dependencies
|
# Core Dependencies
|
||||||
"pydantic>=2.4.2",
|
"pydantic>=2.4.2",
|
||||||
"openai>=1.13.3",
|
"openai>=1.13.3",
|
||||||
"litellm==1.60.2",
|
"litellm==1.57.4",
|
||||||
"instructor>=1.3.3",
|
"instructor>=1.3.3",
|
||||||
|
|
||||||
# Text Processing
|
# Text Processing
|
||||||
"pdfplumber>=0.11.4",
|
"pdfplumber>=0.11.4",
|
||||||
"regex>=2024.9.11",
|
"regex>=2024.9.11",
|
||||||
|
|
||||||
# Telemetry and Monitoring
|
# Telemetry and Monitoring
|
||||||
"opentelemetry-api>=1.22.0",
|
"opentelemetry-api>=1.22.0",
|
||||||
"opentelemetry-sdk>=1.22.0",
|
"opentelemetry-sdk>=1.22.0",
|
||||||
"opentelemetry-exporter-otlp-proto-http>=1.22.0",
|
"opentelemetry-exporter-otlp-proto-http>=1.22.0",
|
||||||
|
|
||||||
# Data Handling
|
# Data Handling
|
||||||
"chromadb>=0.5.23",
|
"chromadb>=0.5.23",
|
||||||
"openpyxl>=3.1.5",
|
"openpyxl>=3.1.5",
|
||||||
"pyvis>=0.3.2",
|
"pyvis>=0.3.2",
|
||||||
|
|
||||||
# Authentication and Security
|
# Authentication and Security
|
||||||
"auth0-python>=4.7.1",
|
"auth0-python>=4.7.1",
|
||||||
"python-dotenv>=1.0.0",
|
"python-dotenv>=1.0.0",
|
||||||
|
|
||||||
# Configuration and Utils
|
# Configuration and Utils
|
||||||
"click>=8.1.7",
|
"click>=8.1.7",
|
||||||
"appdirs>=1.4.4",
|
"appdirs>=1.4.4",
|
||||||
@@ -35,8 +40,7 @@ dependencies = [
|
|||||||
"uv>=0.4.25",
|
"uv>=0.4.25",
|
||||||
"tomli-w>=1.1.0",
|
"tomli-w>=1.1.0",
|
||||||
"tomli>=2.0.2",
|
"tomli>=2.0.2",
|
||||||
"blinker>=1.9.0",
|
"blinker>=1.9.0"
|
||||||
"json5>=0.10.0",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
@@ -45,7 +49,7 @@ Documentation = "https://docs.crewai.com"
|
|||||||
Repository = "https://github.com/crewAIInc/crewAI"
|
Repository = "https://github.com/crewAIInc/crewAI"
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
tools = ["crewai-tools>=0.36.0"]
|
tools = ["crewai-tools>=0.25.5"]
|
||||||
embeddings = [
|
embeddings = [
|
||||||
"tiktoken~=0.7.0"
|
"tiktoken~=0.7.0"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ warnings.filterwarnings(
|
|||||||
category=UserWarning,
|
category=UserWarning,
|
||||||
module="pydantic.main",
|
module="pydantic.main",
|
||||||
)
|
)
|
||||||
__version__ = "0.102.0"
|
__version__ = "0.95.0"
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"Agent",
|
"Agent",
|
||||||
"Crew",
|
"Crew",
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
import re
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import subprocess
|
import subprocess
|
||||||
from typing import Any, Dict, List, Literal, Optional, Sequence, Union
|
from typing import Any, Dict, List, Literal, Optional, Union
|
||||||
|
|
||||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||||
|
|
||||||
from crewai.agents import CacheHandler
|
from crewai.agents import CacheHandler
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||||
|
from crewai.cli.constants import ENV_VARS, LITELLM_PARAMS
|
||||||
from crewai.knowledge.knowledge import Knowledge
|
from crewai.knowledge.knowledge import Knowledge
|
||||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||||
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
|
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
|
||||||
@@ -16,20 +17,29 @@ from crewai.memory.contextual.contextual_memory import ContextualMemory
|
|||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
from crewai.tools import BaseTool
|
from crewai.tools import BaseTool
|
||||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||||
|
from crewai.tools.base_tool import Tool
|
||||||
from crewai.utilities import Converter, Prompts
|
from crewai.utilities import Converter, Prompts
|
||||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||||
from crewai.utilities.converter import generate_model_description
|
from crewai.utilities.converter import generate_model_description
|
||||||
from crewai.utilities.events.agent_events import (
|
|
||||||
AgentExecutionCompletedEvent,
|
|
||||||
AgentExecutionErrorEvent,
|
|
||||||
AgentExecutionStartedEvent,
|
|
||||||
)
|
|
||||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
|
||||||
from crewai.utilities.llm_utils import create_llm
|
from crewai.utilities.llm_utils import create_llm
|
||||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||||
|
|
||||||
|
agentops = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
import agentops # type: ignore # Name "agentops" is already defined
|
||||||
|
from agentops import track_agent # type: ignore
|
||||||
|
except ImportError:
|
||||||
|
|
||||||
|
def track_agent():
|
||||||
|
def noop(f):
|
||||||
|
return f
|
||||||
|
|
||||||
|
return noop
|
||||||
|
|
||||||
|
|
||||||
|
@track_agent()
|
||||||
class Agent(BaseAgent):
|
class Agent(BaseAgent):
|
||||||
"""Represents an agent in a system.
|
"""Represents an agent in a system.
|
||||||
|
|
||||||
@@ -46,13 +56,13 @@ class Agent(BaseAgent):
|
|||||||
llm: The language model that will run the agent.
|
llm: The language model that will run the agent.
|
||||||
function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm.
|
function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm.
|
||||||
max_iter: Maximum number of iterations for an agent to execute a task.
|
max_iter: Maximum number of iterations for an agent to execute a task.
|
||||||
|
memory: Whether the agent should have memory or not.
|
||||||
max_rpm: Maximum number of requests per minute for the agent execution to be respected.
|
max_rpm: Maximum number of requests per minute for the agent execution to be respected.
|
||||||
verbose: Whether the agent execution should be in verbose mode.
|
verbose: Whether the agent execution should be in verbose mode.
|
||||||
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
||||||
tools: Tools at agents disposal
|
tools: Tools at agents disposal
|
||||||
step_callback: Callback to be executed after each step of the agent execution.
|
step_callback: Callback to be executed after each step of the agent execution.
|
||||||
knowledge_sources: Knowledge sources for the agent.
|
knowledge_sources: Knowledge sources for the agent.
|
||||||
embedder: Embedder configuration for the agent.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_times_executed: int = PrivateAttr(default=0)
|
_times_executed: int = PrivateAttr(default=0)
|
||||||
@@ -62,6 +72,9 @@ class Agent(BaseAgent):
|
|||||||
)
|
)
|
||||||
agent_ops_agent_name: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
agent_ops_agent_name: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
||||||
agent_ops_agent_id: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
agent_ops_agent_id: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
||||||
|
cache_handler: InstanceOf[CacheHandler] = Field(
|
||||||
|
default=None, description="An instance of the CacheHandler class."
|
||||||
|
)
|
||||||
step_callback: Optional[Any] = Field(
|
step_callback: Optional[Any] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="Callback to be executed after each step of the agent execution.",
|
description="Callback to be executed after each step of the agent execution.",
|
||||||
@@ -95,6 +108,10 @@ class Agent(BaseAgent):
|
|||||||
default=True,
|
default=True,
|
||||||
description="Keep messages under the context window size by summarizing content.",
|
description="Keep messages under the context window size by summarizing content.",
|
||||||
)
|
)
|
||||||
|
max_iter: int = Field(
|
||||||
|
default=20,
|
||||||
|
description="Maximum number of iterations for an agent to execute a task before giving it's best answer",
|
||||||
|
)
|
||||||
max_retry_limit: int = Field(
|
max_retry_limit: int = Field(
|
||||||
default=2,
|
default=2,
|
||||||
description="Maximum number of retries for an agent to execute a task when an error occurs.",
|
description="Maximum number of retries for an agent to execute a task when an error occurs.",
|
||||||
@@ -107,13 +124,21 @@ class Agent(BaseAgent):
|
|||||||
default="safe",
|
default="safe",
|
||||||
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
|
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
|
||||||
)
|
)
|
||||||
embedder: Optional[Dict[str, Any]] = Field(
|
embedder_config: Optional[Dict[str, Any]] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="Embedder configuration for the agent.",
|
description="Embedder configuration for the agent.",
|
||||||
)
|
)
|
||||||
|
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Knowledge sources for the agent.",
|
||||||
|
)
|
||||||
|
_knowledge: Optional[Knowledge] = PrivateAttr(
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def post_init_setup(self):
|
def post_init_setup(self):
|
||||||
|
self._set_knowledge()
|
||||||
self.agent_ops_agent_name = self.role
|
self.agent_ops_agent_name = self.role
|
||||||
|
|
||||||
self.llm = create_llm(self.llm)
|
self.llm = create_llm(self.llm)
|
||||||
@@ -133,22 +158,17 @@ class Agent(BaseAgent):
|
|||||||
self.cache_handler = CacheHandler()
|
self.cache_handler = CacheHandler()
|
||||||
self.set_cache_handler(self.cache_handler)
|
self.set_cache_handler(self.cache_handler)
|
||||||
|
|
||||||
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
|
def _set_knowledge(self):
|
||||||
try:
|
try:
|
||||||
if self.embedder is None and crew_embedder:
|
|
||||||
self.embedder = crew_embedder
|
|
||||||
|
|
||||||
if self.knowledge_sources:
|
if self.knowledge_sources:
|
||||||
full_pattern = re.compile(r"[^a-zA-Z0-9\-_\r\n]|(\.\.)")
|
knowledge_agent_name = f"{self.role.replace(' ', '_')}"
|
||||||
knowledge_agent_name = f"{re.sub(full_pattern, '_', self.role)}"
|
|
||||||
if isinstance(self.knowledge_sources, list) and all(
|
if isinstance(self.knowledge_sources, list) and all(
|
||||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||||
):
|
):
|
||||||
self.knowledge = Knowledge(
|
self._knowledge = Knowledge(
|
||||||
sources=self.knowledge_sources,
|
sources=self.knowledge_sources,
|
||||||
embedder=self.embedder,
|
embedder_config=self.embedder_config,
|
||||||
collection_name=knowledge_agent_name,
|
collection_name=knowledge_agent_name,
|
||||||
storage=self.knowledge_storage or None,
|
|
||||||
)
|
)
|
||||||
except (TypeError, ValueError) as e:
|
except (TypeError, ValueError) as e:
|
||||||
raise ValueError(f"Invalid Knowledge Configuration: {str(e)}")
|
raise ValueError(f"Invalid Knowledge Configuration: {str(e)}")
|
||||||
@@ -182,15 +202,13 @@ class Agent(BaseAgent):
|
|||||||
if task.output_json:
|
if task.output_json:
|
||||||
# schema = json.dumps(task.output_json, indent=2)
|
# schema = json.dumps(task.output_json, indent=2)
|
||||||
schema = generate_model_description(task.output_json)
|
schema = generate_model_description(task.output_json)
|
||||||
task_prompt += "\n" + self.i18n.slice(
|
|
||||||
"formatted_task_instructions"
|
|
||||||
).format(output_format=schema)
|
|
||||||
|
|
||||||
elif task.output_pydantic:
|
elif task.output_pydantic:
|
||||||
schema = generate_model_description(task.output_pydantic)
|
schema = generate_model_description(task.output_pydantic)
|
||||||
task_prompt += "\n" + self.i18n.slice(
|
|
||||||
"formatted_task_instructions"
|
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format(
|
||||||
).format(output_format=schema)
|
output_format=schema
|
||||||
|
)
|
||||||
|
|
||||||
if context:
|
if context:
|
||||||
task_prompt = self.i18n.slice("task_with_context").format(
|
task_prompt = self.i18n.slice("task_with_context").format(
|
||||||
@@ -209,8 +227,8 @@ class Agent(BaseAgent):
|
|||||||
if memory.strip() != "":
|
if memory.strip() != "":
|
||||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||||
|
|
||||||
if self.knowledge:
|
if self._knowledge:
|
||||||
agent_knowledge_snippets = self.knowledge.query([task.prompt()])
|
agent_knowledge_snippets = self._knowledge.query([task.prompt()])
|
||||||
if agent_knowledge_snippets:
|
if agent_knowledge_snippets:
|
||||||
agent_knowledge_context = extract_knowledge_context(
|
agent_knowledge_context = extract_knowledge_context(
|
||||||
agent_knowledge_snippets
|
agent_knowledge_snippets
|
||||||
@@ -234,15 +252,6 @@ class Agent(BaseAgent):
|
|||||||
task_prompt = self._use_trained_data(task_prompt=task_prompt)
|
task_prompt = self._use_trained_data(task_prompt=task_prompt)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
event=AgentExecutionStartedEvent(
|
|
||||||
agent=self,
|
|
||||||
tools=self.tools,
|
|
||||||
task_prompt=task_prompt,
|
|
||||||
task=task,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
result = self.agent_executor.invoke(
|
result = self.agent_executor.invoke(
|
||||||
{
|
{
|
||||||
"input": task_prompt,
|
"input": task_prompt,
|
||||||
@@ -252,27 +261,8 @@ class Agent(BaseAgent):
|
|||||||
}
|
}
|
||||||
)["output"]
|
)["output"]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if e.__class__.__module__.startswith("litellm"):
|
|
||||||
# Do not retry on litellm errors
|
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
event=AgentExecutionErrorEvent(
|
|
||||||
agent=self,
|
|
||||||
task=task,
|
|
||||||
error=str(e),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
raise e
|
|
||||||
self._times_executed += 1
|
self._times_executed += 1
|
||||||
if self._times_executed > self.max_retry_limit:
|
if self._times_executed > self.max_retry_limit:
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
event=AgentExecutionErrorEvent(
|
|
||||||
agent=self,
|
|
||||||
task=task,
|
|
||||||
error=str(e),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
raise e
|
raise e
|
||||||
result = self.execute_task(task, context, tools)
|
result = self.execute_task(task, context, tools)
|
||||||
|
|
||||||
@@ -285,10 +275,7 @@ class Agent(BaseAgent):
|
|||||||
for tool_result in self.tools_results: # type: ignore # Item "None" of "list[Any] | None" has no attribute "__iter__" (not iterable)
|
for tool_result in self.tools_results: # type: ignore # Item "None" of "list[Any] | None" has no attribute "__iter__" (not iterable)
|
||||||
if tool_result.get("result_as_answer", False):
|
if tool_result.get("result_as_answer", False):
|
||||||
result = tool_result["result"]
|
result = tool_result["result"]
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
event=AgentExecutionCompletedEvent(agent=self, task=task, output=result),
|
|
||||||
)
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def create_agent_executor(
|
def create_agent_executor(
|
||||||
@@ -346,14 +333,14 @@ class Agent(BaseAgent):
|
|||||||
tools = agent_tools.tools()
|
tools = agent_tools.tools()
|
||||||
return tools
|
return tools
|
||||||
|
|
||||||
def get_multimodal_tools(self) -> Sequence[BaseTool]:
|
def get_multimodal_tools(self) -> List[Tool]:
|
||||||
from crewai.tools.agent_tools.add_image_tool import AddImageTool
|
from crewai.tools.agent_tools.add_image_tool import AddImageTool
|
||||||
|
|
||||||
return [AddImageTool()]
|
return [AddImageTool()]
|
||||||
|
|
||||||
def get_code_execution_tools(self):
|
def get_code_execution_tools(self):
|
||||||
try:
|
try:
|
||||||
from crewai_tools import CodeInterpreterTool # type: ignore
|
from crewai_tools import CodeInterpreterTool
|
||||||
|
|
||||||
# Set the unsafe_mode based on the code_execution_mode attribute
|
# Set the unsafe_mode based on the code_execution_mode attribute
|
||||||
unsafe_mode = self.code_execution_mode == "unsafe"
|
unsafe_mode = self.code_execution_mode == "unsafe"
|
||||||
|
|||||||
@@ -18,12 +18,10 @@ from pydantic_core import PydanticCustomError
|
|||||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||||
from crewai.agents.cache.cache_handler import CacheHandler
|
from crewai.agents.cache.cache_handler import CacheHandler
|
||||||
from crewai.agents.tools_handler import ToolsHandler
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
from crewai.knowledge.knowledge import Knowledge
|
from crewai.tools import BaseTool
|
||||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
from crewai.tools.base_tool import Tool
|
||||||
from crewai.tools.base_tool import BaseTool, Tool
|
|
||||||
from crewai.utilities import I18N, Logger, RPMController
|
from crewai.utilities import I18N, Logger, RPMController
|
||||||
from crewai.utilities.config import process_config
|
from crewai.utilities.config import process_config
|
||||||
from crewai.utilities.converter import Converter
|
|
||||||
|
|
||||||
T = TypeVar("T", bound="BaseAgent")
|
T = TypeVar("T", bound="BaseAgent")
|
||||||
|
|
||||||
@@ -42,7 +40,7 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
max_rpm (Optional[int]): Maximum number of requests per minute for the agent execution.
|
max_rpm (Optional[int]): Maximum number of requests per minute for the agent execution.
|
||||||
allow_delegation (bool): Allow delegation of tasks to agents.
|
allow_delegation (bool): Allow delegation of tasks to agents.
|
||||||
tools (Optional[List[Any]]): Tools at the agent's disposal.
|
tools (Optional[List[Any]]): Tools at the agent's disposal.
|
||||||
max_iter (int): Maximum iterations for an agent to execute a task.
|
max_iter (Optional[int]): Maximum iterations for an agent to execute a task.
|
||||||
agent_executor (InstanceOf): An instance of the CrewAgentExecutor class.
|
agent_executor (InstanceOf): An instance of the CrewAgentExecutor class.
|
||||||
llm (Any): Language model that will run the agent.
|
llm (Any): Language model that will run the agent.
|
||||||
crew (Any): Crew to which the agent belongs.
|
crew (Any): Crew to which the agent belongs.
|
||||||
@@ -50,8 +48,6 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
cache_handler (InstanceOf[CacheHandler]): An instance of the CacheHandler class.
|
cache_handler (InstanceOf[CacheHandler]): An instance of the CacheHandler class.
|
||||||
tools_handler (InstanceOf[ToolsHandler]): An instance of the ToolsHandler class.
|
tools_handler (InstanceOf[ToolsHandler]): An instance of the ToolsHandler class.
|
||||||
max_tokens: Maximum number of tokens for the agent to generate in a response.
|
max_tokens: Maximum number of tokens for the agent to generate in a response.
|
||||||
knowledge_sources: Knowledge sources for the agent.
|
|
||||||
knowledge_storage: Custom knowledge storage for the agent.
|
|
||||||
|
|
||||||
|
|
||||||
Methods:
|
Methods:
|
||||||
@@ -111,10 +107,10 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
default=False,
|
default=False,
|
||||||
description="Enable agent to delegate and ask questions among each other.",
|
description="Enable agent to delegate and ask questions among each other.",
|
||||||
)
|
)
|
||||||
tools: Optional[List[BaseTool]] = Field(
|
tools: Optional[List[Any]] = Field(
|
||||||
default_factory=list, description="Tools at agents' disposal"
|
default_factory=list, description="Tools at agents' disposal"
|
||||||
)
|
)
|
||||||
max_iter: int = Field(
|
max_iter: Optional[int] = Field(
|
||||||
default=25, description="Maximum iterations for an agent to execute a task"
|
default=25, description="Maximum iterations for an agent to execute a task"
|
||||||
)
|
)
|
||||||
agent_executor: InstanceOf = Field(
|
agent_executor: InstanceOf = Field(
|
||||||
@@ -125,27 +121,15 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
)
|
)
|
||||||
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
|
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
|
||||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||||
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
|
cache_handler: InstanceOf[CacheHandler] = Field(
|
||||||
default=None, description="An instance of the CacheHandler class."
|
default=None, description="An instance of the CacheHandler class."
|
||||||
)
|
)
|
||||||
tools_handler: InstanceOf[ToolsHandler] = Field(
|
tools_handler: InstanceOf[ToolsHandler] = Field(
|
||||||
default_factory=ToolsHandler,
|
default=None, description="An instance of the ToolsHandler class."
|
||||||
description="An instance of the ToolsHandler class.",
|
|
||||||
)
|
)
|
||||||
max_tokens: Optional[int] = Field(
|
max_tokens: Optional[int] = Field(
|
||||||
default=None, description="Maximum number of tokens for the agent's execution."
|
default=None, description="Maximum number of tokens for the agent's execution."
|
||||||
)
|
)
|
||||||
knowledge: Optional[Knowledge] = Field(
|
|
||||||
default=None, description="Knowledge for the agent."
|
|
||||||
)
|
|
||||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
|
||||||
default=None,
|
|
||||||
description="Knowledge sources for the agent.",
|
|
||||||
)
|
|
||||||
knowledge_storage: Optional[Any] = Field(
|
|
||||||
default=None,
|
|
||||||
description="Custom knowledge storage for the agent.",
|
|
||||||
)
|
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -255,7 +239,7 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_output_converter(
|
def get_output_converter(
|
||||||
self, llm: Any, text: str, model: type[BaseModel] | None, instructions: str
|
self, llm: Any, text: str, model: type[BaseModel] | None, instructions: str
|
||||||
) -> Converter:
|
):
|
||||||
"""Get the converter class for the agent to create json/pydantic outputs."""
|
"""Get the converter class for the agent to create json/pydantic outputs."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -272,44 +256,13 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
"tools_handler",
|
"tools_handler",
|
||||||
"cache_handler",
|
"cache_handler",
|
||||||
"llm",
|
"llm",
|
||||||
"knowledge_sources",
|
|
||||||
"knowledge_storage",
|
|
||||||
"knowledge",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Copy llm
|
# Copy llm and clear callbacks
|
||||||
existing_llm = shallow_copy(self.llm)
|
existing_llm = shallow_copy(self.llm)
|
||||||
copied_knowledge = shallow_copy(self.knowledge)
|
|
||||||
copied_knowledge_storage = shallow_copy(self.knowledge_storage)
|
|
||||||
# Properly copy knowledge sources if they exist
|
|
||||||
existing_knowledge_sources = None
|
|
||||||
if self.knowledge_sources:
|
|
||||||
# Create a shared storage instance for all knowledge sources
|
|
||||||
shared_storage = (
|
|
||||||
self.knowledge_sources[0].storage if self.knowledge_sources else None
|
|
||||||
)
|
|
||||||
|
|
||||||
existing_knowledge_sources = []
|
|
||||||
for source in self.knowledge_sources:
|
|
||||||
copied_source = (
|
|
||||||
source.model_copy()
|
|
||||||
if hasattr(source, "model_copy")
|
|
||||||
else shallow_copy(source)
|
|
||||||
)
|
|
||||||
# Ensure all copied sources use the same storage instance
|
|
||||||
copied_source.storage = shared_storage
|
|
||||||
existing_knowledge_sources.append(copied_source)
|
|
||||||
|
|
||||||
copied_data = self.model_dump(exclude=exclude)
|
copied_data = self.model_dump(exclude=exclude)
|
||||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||||
copied_agent = type(self)(
|
copied_agent = type(self)(**copied_data, llm=existing_llm, tools=self.tools)
|
||||||
**copied_data,
|
|
||||||
llm=existing_llm,
|
|
||||||
tools=self.tools,
|
|
||||||
knowledge_sources=existing_knowledge_sources,
|
|
||||||
knowledge=copied_knowledge,
|
|
||||||
knowledge_storage=copied_knowledge_storage,
|
|
||||||
)
|
|
||||||
|
|
||||||
return copied_agent
|
return copied_agent
|
||||||
|
|
||||||
@@ -351,6 +304,3 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
if not self._rpm_controller:
|
if not self._rpm_controller:
|
||||||
self._rpm_controller = rpm_controller
|
self._rpm_controller = rpm_controller
|
||||||
self.create_agent_executor()
|
self.create_agent_executor()
|
||||||
|
|
||||||
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
|
|
||||||
pass
|
|
||||||
|
|||||||
@@ -95,34 +95,18 @@ class CrewAgentExecutorMixin:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def _ask_human_input(self, final_answer: str) -> str:
|
def _ask_human_input(self, final_answer: str) -> str:
|
||||||
"""Prompt human input with mode-appropriate messaging."""
|
"""Prompt human input for final decision making."""
|
||||||
self._printer.print(
|
self._printer.print(
|
||||||
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"
|
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Training mode prompt (single iteration)
|
self._printer.print(
|
||||||
if self.crew and getattr(self.crew, "_train", False):
|
content=(
|
||||||
prompt = (
|
|
||||||
"\n\n=====\n"
|
"\n\n=====\n"
|
||||||
"## TRAINING MODE: Provide feedback to improve the agent's performance.\n"
|
"## Please provide feedback on the Final Result and the Agent's actions. "
|
||||||
"This will be used to train better versions of the agent.\n"
|
"Respond with 'looks good' or a similar phrase when you're satisfied.\n"
|
||||||
"Please provide detailed feedback about the result quality and reasoning process.\n"
|
|
||||||
"=====\n"
|
"=====\n"
|
||||||
)
|
),
|
||||||
# Regular human-in-the-loop prompt (multiple iterations)
|
color="bold_yellow",
|
||||||
else:
|
)
|
||||||
prompt = (
|
return input()
|
||||||
"\n\n=====\n"
|
|
||||||
"## HUMAN FEEDBACK: Provide feedback on the Final Result and Agent's actions.\n"
|
|
||||||
"Please follow these guidelines:\n"
|
|
||||||
" - If you are happy with the result, simply hit Enter without typing anything.\n"
|
|
||||||
" - Otherwise, provide specific improvement requests.\n"
|
|
||||||
" - You can provide multiple rounds of feedback until satisfied.\n"
|
|
||||||
"=====\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
self._printer.print(content=prompt, color="bold_yellow")
|
|
||||||
response = input()
|
|
||||||
if response.strip() != "":
|
|
||||||
self._printer.print(content="\nProcessing your feedback...", color="cyan")
|
|
||||||
return response
|
|
||||||
|
|||||||
@@ -25,17 +25,17 @@ class OutputConverter(BaseModel, ABC):
|
|||||||
llm: Any = Field(description="The language model to be used to convert the text.")
|
llm: Any = Field(description="The language model to be used to convert the text.")
|
||||||
model: Any = Field(description="The model to be used to convert the text.")
|
model: Any = Field(description="The model to be used to convert the text.")
|
||||||
instructions: str = Field(description="Conversion instructions to the LLM.")
|
instructions: str = Field(description="Conversion instructions to the LLM.")
|
||||||
max_attempts: int = Field(
|
max_attempts: Optional[int] = Field(
|
||||||
description="Max number of attempts to try to get the output formatted.",
|
description="Max number of attempts to try to get the output formatted.",
|
||||||
default=3,
|
default=3,
|
||||||
)
|
)
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def to_pydantic(self, current_attempt=1) -> BaseModel:
|
def to_pydantic(self, current_attempt=1):
|
||||||
"""Convert text to pydantic."""
|
"""Convert text to pydantic."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def to_json(self, current_attempt=1) -> dict:
|
def to_json(self, current_attempt=1):
|
||||||
"""Convert text to json."""
|
"""Convert text to json."""
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -2,26 +2,25 @@ from crewai.types.usage_metrics import UsageMetrics
|
|||||||
|
|
||||||
|
|
||||||
class TokenProcess:
|
class TokenProcess:
|
||||||
def __init__(self) -> None:
|
total_tokens: int = 0
|
||||||
self.total_tokens: int = 0
|
prompt_tokens: int = 0
|
||||||
self.prompt_tokens: int = 0
|
cached_prompt_tokens: int = 0
|
||||||
self.cached_prompt_tokens: int = 0
|
completion_tokens: int = 0
|
||||||
self.completion_tokens: int = 0
|
successful_requests: int = 0
|
||||||
self.successful_requests: int = 0
|
|
||||||
|
|
||||||
def sum_prompt_tokens(self, tokens: int) -> None:
|
def sum_prompt_tokens(self, tokens: int):
|
||||||
self.prompt_tokens += tokens
|
self.prompt_tokens = self.prompt_tokens + tokens
|
||||||
self.total_tokens += tokens
|
self.total_tokens = self.total_tokens + tokens
|
||||||
|
|
||||||
def sum_completion_tokens(self, tokens: int) -> None:
|
def sum_completion_tokens(self, tokens: int):
|
||||||
self.completion_tokens += tokens
|
self.completion_tokens = self.completion_tokens + tokens
|
||||||
self.total_tokens += tokens
|
self.total_tokens = self.total_tokens + tokens
|
||||||
|
|
||||||
def sum_cached_prompt_tokens(self, tokens: int) -> None:
|
def sum_cached_prompt_tokens(self, tokens: int):
|
||||||
self.cached_prompt_tokens += tokens
|
self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
|
||||||
|
|
||||||
def sum_successful_requests(self, requests: int) -> None:
|
def sum_successful_requests(self, requests: int):
|
||||||
self.successful_requests += requests
|
self.successful_requests = self.successful_requests + requests
|
||||||
|
|
||||||
def get_summary(self) -> UsageMetrics:
|
def get_summary(self) -> UsageMetrics:
|
||||||
return UsageMetrics(
|
return UsageMetrics(
|
||||||
|
|||||||
@@ -13,17 +13,10 @@ from crewai.agents.parser import (
|
|||||||
OutputParserException,
|
OutputParserException,
|
||||||
)
|
)
|
||||||
from crewai.agents.tools_handler import ToolsHandler
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
from crewai.llm import LLM
|
|
||||||
from crewai.tools.base_tool import BaseTool
|
from crewai.tools.base_tool import BaseTool
|
||||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||||
from crewai.utilities import I18N, Printer
|
from crewai.utilities import I18N, Printer
|
||||||
from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE
|
from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE
|
||||||
from crewai.utilities.events import (
|
|
||||||
ToolUsageErrorEvent,
|
|
||||||
ToolUsageStartedEvent,
|
|
||||||
crewai_event_bus,
|
|
||||||
)
|
|
||||||
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent
|
|
||||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||||
LLMContextLengthExceededException,
|
LLMContextLengthExceededException,
|
||||||
)
|
)
|
||||||
@@ -61,7 +54,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
callbacks: List[Any] = [],
|
callbacks: List[Any] = [],
|
||||||
):
|
):
|
||||||
self._i18n: I18N = I18N()
|
self._i18n: I18N = I18N()
|
||||||
self.llm: LLM = llm
|
self.llm = llm
|
||||||
self.task = task
|
self.task = task
|
||||||
self.agent = agent
|
self.agent = agent
|
||||||
self.crew = crew
|
self.crew = crew
|
||||||
@@ -87,8 +80,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
|
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
|
||||||
tool.name: tool for tool in self.tools
|
tool.name: tool for tool in self.tools
|
||||||
}
|
}
|
||||||
self.stop = stop_words
|
if self.llm.stop:
|
||||||
self.llm.stop = list(set(self.llm.stop + self.stop))
|
self.llm.stop = list(set(self.llm.stop + self.stop))
|
||||||
|
else:
|
||||||
|
self.llm.stop = self.stop
|
||||||
|
|
||||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||||
if "system" in self.prompt:
|
if "system" in self.prompt:
|
||||||
@@ -103,22 +98,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self._show_start_logs()
|
self._show_start_logs()
|
||||||
|
|
||||||
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
||||||
|
formatted_answer = self._invoke_loop()
|
||||||
try:
|
|
||||||
formatted_answer = self._invoke_loop()
|
|
||||||
except AssertionError:
|
|
||||||
self._printer.print(
|
|
||||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
self._handle_unknown_error(e)
|
|
||||||
if e.__class__.__module__.startswith("litellm"):
|
|
||||||
# Do not retry on litellm errors
|
|
||||||
raise e
|
|
||||||
else:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
if self.ask_for_human_input:
|
if self.ask_for_human_input:
|
||||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||||
@@ -127,7 +107,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self._create_long_term_memory(formatted_answer)
|
self._create_long_term_memory(formatted_answer)
|
||||||
return {"output": formatted_answer.output}
|
return {"output": formatted_answer.output}
|
||||||
|
|
||||||
def _invoke_loop(self) -> AgentFinish:
|
def _invoke_loop(self):
|
||||||
"""
|
"""
|
||||||
Main loop to invoke the agent's thought process until it reaches a conclusion
|
Main loop to invoke the agent's thought process until it reaches a conclusion
|
||||||
or the maximum number of iterations is reached.
|
or the maximum number of iterations is reached.
|
||||||
@@ -144,6 +124,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self._enforce_rpm_limit()
|
self._enforce_rpm_limit()
|
||||||
|
|
||||||
answer = self._get_llm_response()
|
answer = self._get_llm_response()
|
||||||
|
|
||||||
formatted_answer = self._process_llm_response(answer)
|
formatted_answer = self._process_llm_response(answer)
|
||||||
|
|
||||||
if isinstance(formatted_answer, AgentAction):
|
if isinstance(formatted_answer, AgentAction):
|
||||||
@@ -161,37 +142,13 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
formatted_answer = self._handle_output_parser_exception(e)
|
formatted_answer = self._handle_output_parser_exception(e)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if e.__class__.__module__.startswith("litellm"):
|
|
||||||
# Do not retry on litellm errors
|
|
||||||
raise e
|
|
||||||
if self._is_context_length_exceeded(e):
|
if self._is_context_length_exceeded(e):
|
||||||
self._handle_context_length()
|
self._handle_context_length()
|
||||||
continue
|
continue
|
||||||
else:
|
|
||||||
self._handle_unknown_error(e)
|
|
||||||
raise e
|
|
||||||
finally:
|
|
||||||
self.iterations += 1
|
|
||||||
|
|
||||||
# During the invoke loop, formatted_answer alternates between AgentAction
|
|
||||||
# (when the agent is using tools) and eventually becomes AgentFinish
|
|
||||||
# (when the agent reaches a final answer). This assertion confirms we've
|
|
||||||
# reached a final answer and helps type checking understand this transition.
|
|
||||||
assert isinstance(formatted_answer, AgentFinish)
|
|
||||||
self._show_logs(formatted_answer)
|
self._show_logs(formatted_answer)
|
||||||
return formatted_answer
|
return formatted_answer
|
||||||
|
|
||||||
def _handle_unknown_error(self, exception: Exception) -> None:
|
|
||||||
"""Handle unknown errors by informing the user."""
|
|
||||||
self._printer.print(
|
|
||||||
content="An unknown error occurred. Please check the details below.",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
self._printer.print(
|
|
||||||
content=f"Error details: {exception}",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
|
|
||||||
def _has_reached_max_iterations(self) -> bool:
|
def _has_reached_max_iterations(self) -> bool:
|
||||||
"""Check if the maximum number of iterations has been reached."""
|
"""Check if the maximum number of iterations has been reached."""
|
||||||
return self.iterations >= self.max_iter
|
return self.iterations >= self.max_iter
|
||||||
@@ -203,17 +160,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
|
|
||||||
def _get_llm_response(self) -> str:
|
def _get_llm_response(self) -> str:
|
||||||
"""Call the LLM and return the response, handling any invalid responses."""
|
"""Call the LLM and return the response, handling any invalid responses."""
|
||||||
try:
|
answer = self.llm.call(
|
||||||
answer = self.llm.call(
|
self.messages,
|
||||||
self.messages,
|
callbacks=self.callbacks,
|
||||||
callbacks=self.callbacks,
|
)
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
self._printer.print(
|
|
||||||
content=f"Error during LLM call: {e}",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
raise e
|
|
||||||
|
|
||||||
if not answer:
|
if not answer:
|
||||||
self._printer.print(
|
self._printer.print(
|
||||||
@@ -234,6 +184,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
|
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
|
||||||
answer = answer.split("Observation:")[0].strip()
|
answer = answer.split("Observation:")[0].strip()
|
||||||
|
|
||||||
|
self.iterations += 1
|
||||||
return self._format_answer(answer)
|
return self._format_answer(answer)
|
||||||
|
|
||||||
def _handle_agent_action(
|
def _handle_agent_action(
|
||||||
@@ -309,11 +260,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self._printer.print(
|
self._printer.print(
|
||||||
content=f"\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
|
content=f"\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
|
||||||
)
|
)
|
||||||
description = (
|
|
||||||
getattr(self.task, "description") if self.task else "Not Found"
|
|
||||||
)
|
|
||||||
self._printer.print(
|
self._printer.print(
|
||||||
content=f"\033[95m## Task:\033[00m \033[92m{description}\033[00m"
|
content=f"\033[95m## Task:\033[00m \033[92m{self.task.description}\033[00m"
|
||||||
)
|
)
|
||||||
|
|
||||||
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
|
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
|
||||||
@@ -355,68 +303,40 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _execute_tool_and_check_finality(self, agent_action: AgentAction) -> ToolResult:
|
def _execute_tool_and_check_finality(self, agent_action: AgentAction) -> ToolResult:
|
||||||
try:
|
tool_usage = ToolUsage(
|
||||||
if self.agent:
|
tools_handler=self.tools_handler,
|
||||||
crewai_event_bus.emit(
|
tools=self.tools,
|
||||||
self,
|
original_tools=self.original_tools,
|
||||||
event=ToolUsageStartedEvent(
|
tools_description=self.tools_description,
|
||||||
agent_key=self.agent.key,
|
tools_names=self.tools_names,
|
||||||
agent_role=self.agent.role,
|
function_calling_llm=self.function_calling_llm,
|
||||||
tool_name=agent_action.tool,
|
task=self.task, # type: ignore[arg-type]
|
||||||
tool_args=agent_action.tool_input,
|
agent=self.agent,
|
||||||
tool_class=agent_action.tool,
|
action=agent_action,
|
||||||
),
|
)
|
||||||
)
|
tool_calling = tool_usage.parse_tool_calling(agent_action.text)
|
||||||
tool_usage = ToolUsage(
|
|
||||||
tools_handler=self.tools_handler,
|
|
||||||
tools=self.tools,
|
|
||||||
original_tools=self.original_tools,
|
|
||||||
tools_description=self.tools_description,
|
|
||||||
tools_names=self.tools_names,
|
|
||||||
function_calling_llm=self.function_calling_llm,
|
|
||||||
task=self.task, # type: ignore[arg-type]
|
|
||||||
agent=self.agent,
|
|
||||||
action=agent_action,
|
|
||||||
)
|
|
||||||
tool_calling = tool_usage.parse_tool_calling(agent_action.text)
|
|
||||||
|
|
||||||
if isinstance(tool_calling, ToolUsageErrorException):
|
if isinstance(tool_calling, ToolUsageErrorException):
|
||||||
tool_result = tool_calling.message
|
tool_result = tool_calling.message
|
||||||
return ToolResult(result=tool_result, result_as_answer=False)
|
return ToolResult(result=tool_result, result_as_answer=False)
|
||||||
else:
|
else:
|
||||||
if tool_calling.tool_name.casefold().strip() in [
|
if tool_calling.tool_name.casefold().strip() in [
|
||||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||||
] or tool_calling.tool_name.casefold().replace("_", " ") in [
|
] or tool_calling.tool_name.casefold().replace("_", " ") in [
|
||||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||||
]:
|
]:
|
||||||
tool_result = tool_usage.use(tool_calling, agent_action.text)
|
tool_result = tool_usage.use(tool_calling, agent_action.text)
|
||||||
tool = self.tool_name_to_tool_map.get(tool_calling.tool_name)
|
tool = self.tool_name_to_tool_map.get(tool_calling.tool_name)
|
||||||
if tool:
|
if tool:
|
||||||
return ToolResult(
|
return ToolResult(
|
||||||
result=tool_result, result_as_answer=tool.result_as_answer
|
result=tool_result, result_as_answer=tool.result_as_answer
|
||||||
)
|
|
||||||
else:
|
|
||||||
tool_result = self._i18n.errors("wrong_tool_name").format(
|
|
||||||
tool=tool_calling.tool_name,
|
|
||||||
tools=", ".join([tool.name.casefold() for tool in self.tools]),
|
|
||||||
)
|
)
|
||||||
return ToolResult(result=tool_result, result_as_answer=False)
|
else:
|
||||||
|
tool_result = self._i18n.errors("wrong_tool_name").format(
|
||||||
except Exception as e:
|
tool=tool_calling.tool_name,
|
||||||
# TODO: drop
|
tools=", ".join([tool.name.casefold() for tool in self.tools]),
|
||||||
if self.agent:
|
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
event=ToolUsageErrorEvent( # validation error
|
|
||||||
agent_key=self.agent.key,
|
|
||||||
agent_role=self.agent.role,
|
|
||||||
tool_name=agent_action.tool,
|
|
||||||
tool_args=agent_action.tool_input,
|
|
||||||
tool_class=agent_action.tool,
|
|
||||||
error=str(e),
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
raise e
|
return ToolResult(result=tool_result, result_as_answer=False)
|
||||||
|
|
||||||
def _summarize_messages(self) -> None:
|
def _summarize_messages(self) -> None:
|
||||||
messages_groups = []
|
messages_groups = []
|
||||||
@@ -466,50 +386,58 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _handle_crew_training_output(
|
def _handle_crew_training_output(
|
||||||
self, result: AgentFinish, human_feedback: Optional[str] = None
|
self, result: AgentFinish, human_feedback: str | None = None
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Handle the process of saving training data."""
|
"""Function to handle the process of the training data."""
|
||||||
agent_id = str(self.agent.id) # type: ignore
|
agent_id = str(self.agent.id) # type: ignore
|
||||||
train_iteration = (
|
|
||||||
getattr(self.crew, "_train_iteration", None) if self.crew else None
|
|
||||||
)
|
|
||||||
|
|
||||||
if train_iteration is None or not isinstance(train_iteration, int):
|
|
||||||
self._printer.print(
|
|
||||||
content="Invalid or missing train iteration. Cannot save training data.",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
|
# Load training data
|
||||||
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
|
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
|
||||||
training_data = training_handler.load() or {}
|
training_data = training_handler.load()
|
||||||
|
|
||||||
# Initialize or retrieve agent's training data
|
# Check if training data exists, human input is not requested, and self.crew is valid
|
||||||
agent_training_data = training_data.get(agent_id, {})
|
if training_data and not self.ask_for_human_input:
|
||||||
|
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
||||||
if human_feedback is not None:
|
train_iteration = self.crew._train_iteration
|
||||||
# Save initial output and human feedback
|
if agent_id in training_data and isinstance(train_iteration, int):
|
||||||
agent_training_data[train_iteration] = {
|
training_data[agent_id][train_iteration][
|
||||||
"initial_output": result.output,
|
"improved_output"
|
||||||
"human_feedback": human_feedback,
|
] = result.output
|
||||||
}
|
training_handler.save(training_data)
|
||||||
else:
|
else:
|
||||||
# Save improved output
|
self._printer.print(
|
||||||
if train_iteration in agent_training_data:
|
content="Invalid train iteration type or agent_id not in training data.",
|
||||||
agent_training_data[train_iteration]["improved_output"] = result.output
|
color="red",
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self._printer.print(
|
self._printer.print(
|
||||||
content=(
|
content="Crew is None or does not have _train_iteration attribute.",
|
||||||
f"No existing training data for agent {agent_id} and iteration "
|
|
||||||
f"{train_iteration}. Cannot save improved output."
|
|
||||||
),
|
|
||||||
color="red",
|
color="red",
|
||||||
)
|
)
|
||||||
return
|
|
||||||
|
|
||||||
# Update the training data and save
|
if self.ask_for_human_input and human_feedback is not None:
|
||||||
training_data[agent_id] = agent_training_data
|
training_data = {
|
||||||
training_handler.save(training_data)
|
"initial_output": result.output,
|
||||||
|
"human_feedback": human_feedback,
|
||||||
|
"agent": agent_id,
|
||||||
|
"agent_role": self.agent.role, # type: ignore
|
||||||
|
}
|
||||||
|
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
||||||
|
train_iteration = self.crew._train_iteration
|
||||||
|
if isinstance(train_iteration, int):
|
||||||
|
CrewTrainingHandler(TRAINING_DATA_FILE).append(
|
||||||
|
train_iteration, agent_id, training_data
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self._printer.print(
|
||||||
|
content="Invalid train iteration type. Expected int.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self._printer.print(
|
||||||
|
content="Crew is None or does not have _train_iteration attribute.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
|
||||||
def _format_prompt(self, prompt: str, inputs: Dict[str, str]) -> str:
|
def _format_prompt(self, prompt: str, inputs: Dict[str, str]) -> str:
|
||||||
prompt = prompt.replace("{input}", inputs["input"])
|
prompt = prompt.replace("{input}", inputs["input"])
|
||||||
@@ -525,85 +453,82 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
return {"role": role, "content": prompt}
|
return {"role": role, "content": prompt}
|
||||||
|
|
||||||
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
|
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
|
||||||
"""Handle human feedback with different flows for training vs regular use.
|
"""
|
||||||
|
Handles the human feedback loop, allowing the user to provide feedback
|
||||||
|
on the agent's output and determining if additional iterations are needed.
|
||||||
|
|
||||||
Args:
|
Parameters:
|
||||||
formatted_answer: The initial AgentFinish result to get feedback on
|
formatted_answer (AgentFinish): The initial output from the agent.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
AgentFinish: The final answer after processing feedback
|
AgentFinish: The final output after incorporating human feedback.
|
||||||
"""
|
"""
|
||||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
|
||||||
|
|
||||||
if self._is_training_mode():
|
|
||||||
return self._handle_training_feedback(formatted_answer, human_feedback)
|
|
||||||
|
|
||||||
return self._handle_regular_feedback(formatted_answer, human_feedback)
|
|
||||||
|
|
||||||
def _is_training_mode(self) -> bool:
|
|
||||||
"""Check if crew is in training mode."""
|
|
||||||
return bool(self.crew and self.crew._train)
|
|
||||||
|
|
||||||
def _handle_training_feedback(
|
|
||||||
self, initial_answer: AgentFinish, feedback: str
|
|
||||||
) -> AgentFinish:
|
|
||||||
"""Process feedback for training scenarios with single iteration."""
|
|
||||||
self._handle_crew_training_output(initial_answer, feedback)
|
|
||||||
self.messages.append(
|
|
||||||
self._format_msg(
|
|
||||||
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
improved_answer = self._invoke_loop()
|
|
||||||
self._handle_crew_training_output(improved_answer)
|
|
||||||
self.ask_for_human_input = False
|
|
||||||
return improved_answer
|
|
||||||
|
|
||||||
def _handle_regular_feedback(
|
|
||||||
self, current_answer: AgentFinish, initial_feedback: str
|
|
||||||
) -> AgentFinish:
|
|
||||||
"""Process feedback for regular use with potential multiple iterations."""
|
|
||||||
feedback = initial_feedback
|
|
||||||
answer = current_answer
|
|
||||||
|
|
||||||
while self.ask_for_human_input:
|
while self.ask_for_human_input:
|
||||||
# If the user provides a blank response, assume they are happy with the result
|
human_feedback = self._ask_human_input(formatted_answer.output)
|
||||||
if feedback.strip() == "":
|
|
||||||
|
if self.crew and self.crew._train:
|
||||||
|
self._handle_crew_training_output(formatted_answer, human_feedback)
|
||||||
|
|
||||||
|
# Make an LLM call to verify if additional changes are requested based on human feedback
|
||||||
|
additional_changes_prompt = self._i18n.slice(
|
||||||
|
"human_feedback_classification"
|
||||||
|
).format(feedback=human_feedback)
|
||||||
|
|
||||||
|
retry_count = 0
|
||||||
|
llm_call_successful = False
|
||||||
|
additional_changes_response = None
|
||||||
|
|
||||||
|
while retry_count < MAX_LLM_RETRY and not llm_call_successful:
|
||||||
|
try:
|
||||||
|
additional_changes_response = (
|
||||||
|
self.llm.call(
|
||||||
|
[
|
||||||
|
self._format_msg(
|
||||||
|
additional_changes_prompt, role="system"
|
||||||
|
)
|
||||||
|
],
|
||||||
|
callbacks=self.callbacks,
|
||||||
|
)
|
||||||
|
.strip()
|
||||||
|
.lower()
|
||||||
|
)
|
||||||
|
llm_call_successful = True
|
||||||
|
except Exception as e:
|
||||||
|
retry_count += 1
|
||||||
|
|
||||||
|
self._printer.print(
|
||||||
|
content=f"Error during LLM call to classify human feedback: {e}. Retrying... ({retry_count}/{MAX_LLM_RETRY})",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
|
||||||
|
if not llm_call_successful:
|
||||||
|
self._printer.print(
|
||||||
|
content="Error processing feedback after multiple attempts.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
self.ask_for_human_input = False
|
self.ask_for_human_input = False
|
||||||
|
break
|
||||||
|
|
||||||
|
if additional_changes_response == "false":
|
||||||
|
self.ask_for_human_input = False
|
||||||
|
elif additional_changes_response == "true":
|
||||||
|
self.ask_for_human_input = True
|
||||||
|
# Add human feedback to messages
|
||||||
|
self.messages.append(self._format_msg(f"Feedback: {human_feedback}"))
|
||||||
|
# Invoke the loop again with updated messages
|
||||||
|
formatted_answer = self._invoke_loop()
|
||||||
|
|
||||||
|
if self.crew and self.crew._train:
|
||||||
|
self._handle_crew_training_output(formatted_answer)
|
||||||
else:
|
else:
|
||||||
answer = self._process_feedback_iteration(feedback)
|
# Unexpected response
|
||||||
feedback = self._ask_human_input(answer.output)
|
self._printer.print(
|
||||||
|
content=f"Unexpected response from LLM: '{additional_changes_response}'. Assuming no additional changes requested.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
self.ask_for_human_input = False
|
||||||
|
|
||||||
return answer
|
return formatted_answer
|
||||||
|
|
||||||
def _process_feedback_iteration(self, feedback: str) -> AgentFinish:
|
|
||||||
"""Process a single feedback iteration."""
|
|
||||||
self.messages.append(
|
|
||||||
self._format_msg(
|
|
||||||
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return self._invoke_loop()
|
|
||||||
|
|
||||||
def _log_feedback_error(self, retry_count: int, error: Exception) -> None:
|
|
||||||
"""Log feedback processing errors."""
|
|
||||||
self._printer.print(
|
|
||||||
content=(
|
|
||||||
f"Error processing feedback: {error}. "
|
|
||||||
f"Retrying... ({retry_count + 1}/{MAX_LLM_RETRY})"
|
|
||||||
),
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
|
|
||||||
def _log_max_retries_exceeded(self) -> None:
|
|
||||||
"""Log when max retries for feedback processing are exceeded."""
|
|
||||||
self._printer.print(
|
|
||||||
content=(
|
|
||||||
f"Failed to process feedback after {MAX_LLM_RETRY} attempts. "
|
|
||||||
"Ending feedback loop."
|
|
||||||
),
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
|
|
||||||
def _handle_max_iterations_exceeded(self, formatted_answer):
|
def _handle_max_iterations_exceeded(self, formatted_answer):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -94,13 +94,6 @@ class CrewAgentParser:
|
|||||||
|
|
||||||
elif includes_answer:
|
elif includes_answer:
|
||||||
final_answer = text.split(FINAL_ANSWER_ACTION)[-1].strip()
|
final_answer = text.split(FINAL_ANSWER_ACTION)[-1].strip()
|
||||||
# Check whether the final answer ends with triple backticks.
|
|
||||||
if final_answer.endswith("```"):
|
|
||||||
# Count occurrences of triple backticks in the final answer.
|
|
||||||
count = final_answer.count("```")
|
|
||||||
# If count is odd then it's an unmatched trailing set; remove it.
|
|
||||||
if count % 2 != 0:
|
|
||||||
final_answer = final_answer[:-3].rstrip()
|
|
||||||
return AgentFinish(thought, final_answer, text)
|
return AgentFinish(thought, final_answer, text)
|
||||||
|
|
||||||
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
|
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
|
||||||
@@ -127,10 +120,7 @@ class CrewAgentParser:
|
|||||||
regex = r"(.*?)(?:\n\nAction|\n\nFinal Answer)"
|
regex = r"(.*?)(?:\n\nAction|\n\nFinal Answer)"
|
||||||
thought_match = re.search(regex, text, re.DOTALL)
|
thought_match = re.search(regex, text, re.DOTALL)
|
||||||
if thought_match:
|
if thought_match:
|
||||||
thought = thought_match.group(1).strip()
|
return thought_match.group(1).strip()
|
||||||
# Remove any triple backticks from the thought string
|
|
||||||
thought = thought.replace("```", "").strip()
|
|
||||||
return thought
|
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
def _clean_action(self, text: str) -> str:
|
def _clean_action(self, text: str) -> str:
|
||||||
|
|||||||
@@ -350,10 +350,7 @@ def chat():
|
|||||||
Start a conversation with the Crew, collecting user-supplied inputs,
|
Start a conversation with the Crew, collecting user-supplied inputs,
|
||||||
and using the Chat LLM to generate responses.
|
and using the Chat LLM to generate responses.
|
||||||
"""
|
"""
|
||||||
click.secho(
|
click.echo("Starting a conversation with the Crew")
|
||||||
"\nStarting a conversation with the Crew\n" "Type 'exit' or Ctrl+C to quit.\n",
|
|
||||||
)
|
|
||||||
|
|
||||||
run_chat()
|
run_chat()
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -216,43 +216,10 @@ MODELS = {
|
|||||||
"watsonx/ibm/granite-3-8b-instruct",
|
"watsonx/ibm/granite-3-8b-instruct",
|
||||||
],
|
],
|
||||||
"bedrock": [
|
"bedrock": [
|
||||||
"bedrock/us.amazon.nova-pro-v1:0",
|
|
||||||
"bedrock/us.amazon.nova-micro-v1:0",
|
|
||||||
"bedrock/us.amazon.nova-lite-v1:0",
|
|
||||||
"bedrock/us.anthropic.claude-3-5-sonnet-20240620-v1:0",
|
|
||||||
"bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0",
|
|
||||||
"bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0",
|
|
||||||
"bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
|
||||||
"bedrock/us.anthropic.claude-3-sonnet-20240229-v1:0",
|
|
||||||
"bedrock/us.anthropic.claude-3-opus-20240229-v1:0",
|
|
||||||
"bedrock/us.anthropic.claude-3-haiku-20240307-v1:0",
|
|
||||||
"bedrock/us.meta.llama3-2-11b-instruct-v1:0",
|
|
||||||
"bedrock/us.meta.llama3-2-3b-instruct-v1:0",
|
|
||||||
"bedrock/us.meta.llama3-2-90b-instruct-v1:0",
|
|
||||||
"bedrock/us.meta.llama3-2-1b-instruct-v1:0",
|
|
||||||
"bedrock/us.meta.llama3-1-8b-instruct-v1:0",
|
|
||||||
"bedrock/us.meta.llama3-1-70b-instruct-v1:0",
|
|
||||||
"bedrock/us.meta.llama3-3-70b-instruct-v1:0",
|
|
||||||
"bedrock/us.meta.llama3-1-405b-instruct-v1:0",
|
|
||||||
"bedrock/eu.anthropic.claude-3-5-sonnet-20240620-v1:0",
|
|
||||||
"bedrock/eu.anthropic.claude-3-sonnet-20240229-v1:0",
|
|
||||||
"bedrock/eu.anthropic.claude-3-haiku-20240307-v1:0",
|
|
||||||
"bedrock/eu.meta.llama3-2-3b-instruct-v1:0",
|
|
||||||
"bedrock/eu.meta.llama3-2-1b-instruct-v1:0",
|
|
||||||
"bedrock/apac.anthropic.claude-3-5-sonnet-20240620-v1:0",
|
|
||||||
"bedrock/apac.anthropic.claude-3-5-sonnet-20241022-v2:0",
|
|
||||||
"bedrock/apac.anthropic.claude-3-sonnet-20240229-v1:0",
|
|
||||||
"bedrock/apac.anthropic.claude-3-haiku-20240307-v1:0",
|
|
||||||
"bedrock/amazon.nova-pro-v1:0",
|
|
||||||
"bedrock/amazon.nova-micro-v1:0",
|
|
||||||
"bedrock/amazon.nova-lite-v1:0",
|
|
||||||
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||||
"bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
|
|
||||||
"bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
|
|
||||||
"bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0",
|
|
||||||
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
||||||
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
|
||||||
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
||||||
|
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
||||||
"bedrock/anthropic.claude-v2:1",
|
"bedrock/anthropic.claude-v2:1",
|
||||||
"bedrock/anthropic.claude-v2",
|
"bedrock/anthropic.claude-v2",
|
||||||
"bedrock/anthropic.claude-instant-v1",
|
"bedrock/anthropic.claude-instant-v1",
|
||||||
@@ -267,6 +234,8 @@ MODELS = {
|
|||||||
"bedrock/ai21.j2-mid-v1",
|
"bedrock/ai21.j2-mid-v1",
|
||||||
"bedrock/ai21.j2-ultra-v1",
|
"bedrock/ai21.j2-ultra-v1",
|
||||||
"bedrock/ai21.jamba-instruct-v1:0",
|
"bedrock/ai21.jamba-instruct-v1:0",
|
||||||
|
"bedrock/meta.llama2-13b-chat-v1",
|
||||||
|
"bedrock/meta.llama2-70b-chat-v1",
|
||||||
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
||||||
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -1,52 +1,17 @@
|
|||||||
import json
|
import json
|
||||||
import platform
|
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Optional, Set, Tuple
|
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||||
|
|
||||||
import click
|
import click
|
||||||
import tomli
|
import tomli
|
||||||
from packaging import version
|
|
||||||
|
|
||||||
from crewai.cli.utils import read_toml
|
|
||||||
from crewai.cli.version import get_crewai_version
|
|
||||||
from crewai.crew import Crew
|
from crewai.crew import Crew
|
||||||
from crewai.llm import LLM
|
from crewai.llm import LLM
|
||||||
from crewai.types.crew_chat import ChatInputField, ChatInputs
|
from crewai.types.crew_chat import ChatInputField, ChatInputs
|
||||||
from crewai.utilities.llm_utils import create_llm
|
from crewai.utilities.llm_utils import create_llm
|
||||||
|
|
||||||
MIN_REQUIRED_VERSION = "0.98.0"
|
|
||||||
|
|
||||||
|
|
||||||
def check_conversational_crews_version(
|
|
||||||
crewai_version: str, pyproject_data: dict
|
|
||||||
) -> bool:
|
|
||||||
"""
|
|
||||||
Check if the installed crewAI version supports conversational crews.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
crewai_version: The current version of crewAI.
|
|
||||||
pyproject_data: Dictionary containing pyproject.toml data.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if version check passes, False otherwise.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if version.parse(crewai_version) < version.parse(MIN_REQUIRED_VERSION):
|
|
||||||
click.secho(
|
|
||||||
"You are using an older version of crewAI that doesn't support conversational crews. "
|
|
||||||
"Run 'uv upgrade crewai' to get the latest version.",
|
|
||||||
fg="red",
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
except version.InvalidVersion:
|
|
||||||
click.secho("Invalid crewAI version format detected.", fg="red")
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def run_chat():
|
def run_chat():
|
||||||
"""
|
"""
|
||||||
@@ -54,47 +19,20 @@ def run_chat():
|
|||||||
Incorporates crew_name, crew_description, and input fields to build a tool schema.
|
Incorporates crew_name, crew_description, and input fields to build a tool schema.
|
||||||
Exits if crew_name or crew_description are missing.
|
Exits if crew_name or crew_description are missing.
|
||||||
"""
|
"""
|
||||||
crewai_version = get_crewai_version()
|
|
||||||
pyproject_data = read_toml()
|
|
||||||
|
|
||||||
if not check_conversational_crews_version(crewai_version, pyproject_data):
|
|
||||||
return
|
|
||||||
|
|
||||||
crew, crew_name = load_crew_and_name()
|
crew, crew_name = load_crew_and_name()
|
||||||
chat_llm = initialize_chat_llm(crew)
|
chat_llm = initialize_chat_llm(crew)
|
||||||
if not chat_llm:
|
if not chat_llm:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Indicate that the crew is being analyzed
|
crew_chat_inputs = generate_crew_chat_inputs(crew, crew_name, chat_llm)
|
||||||
click.secho(
|
crew_tool_schema = generate_crew_tool_schema(crew_chat_inputs)
|
||||||
"\nAnalyzing crew and required inputs - this may take 3 to 30 seconds "
|
system_message = build_system_message(crew_chat_inputs)
|
||||||
"depending on the complexity of your crew.",
|
|
||||||
fg="white",
|
# Call the LLM to generate the introductory message
|
||||||
|
introductory_message = chat_llm.call(
|
||||||
|
messages=[{"role": "system", "content": system_message}]
|
||||||
)
|
)
|
||||||
|
click.secho(f"\nAssistant: {introductory_message}\n", fg="green")
|
||||||
# Start loading indicator
|
|
||||||
loading_complete = threading.Event()
|
|
||||||
loading_thread = threading.Thread(target=show_loading, args=(loading_complete,))
|
|
||||||
loading_thread.start()
|
|
||||||
|
|
||||||
try:
|
|
||||||
crew_chat_inputs = generate_crew_chat_inputs(crew, crew_name, chat_llm)
|
|
||||||
crew_tool_schema = generate_crew_tool_schema(crew_chat_inputs)
|
|
||||||
system_message = build_system_message(crew_chat_inputs)
|
|
||||||
|
|
||||||
# Call the LLM to generate the introductory message
|
|
||||||
introductory_message = chat_llm.call(
|
|
||||||
messages=[{"role": "system", "content": system_message}]
|
|
||||||
)
|
|
||||||
finally:
|
|
||||||
# Stop loading indicator
|
|
||||||
loading_complete.set()
|
|
||||||
loading_thread.join()
|
|
||||||
|
|
||||||
# Indicate that the analysis is complete
|
|
||||||
click.secho("\nFinished analyzing crew.\n", fg="white")
|
|
||||||
|
|
||||||
click.secho(f"Assistant: {introductory_message}\n", fg="green")
|
|
||||||
|
|
||||||
messages = [
|
messages = [
|
||||||
{"role": "system", "content": system_message},
|
{"role": "system", "content": system_message},
|
||||||
@@ -105,17 +43,15 @@ def run_chat():
|
|||||||
crew_chat_inputs.crew_name: create_tool_function(crew, messages),
|
crew_chat_inputs.crew_name: create_tool_function(crew, messages),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
click.secho(
|
||||||
|
"\nEntering an interactive chat loop with function-calling.\n"
|
||||||
|
"Type 'exit' or Ctrl+C to quit.\n",
|
||||||
|
fg="cyan",
|
||||||
|
)
|
||||||
|
|
||||||
chat_loop(chat_llm, messages, crew_tool_schema, available_functions)
|
chat_loop(chat_llm, messages, crew_tool_schema, available_functions)
|
||||||
|
|
||||||
|
|
||||||
def show_loading(event: threading.Event):
|
|
||||||
"""Display animated loading dots while processing."""
|
|
||||||
while not event.is_set():
|
|
||||||
print(".", end="", flush=True)
|
|
||||||
time.sleep(1)
|
|
||||||
print()
|
|
||||||
|
|
||||||
|
|
||||||
def initialize_chat_llm(crew: Crew) -> Optional[LLM]:
|
def initialize_chat_llm(crew: Crew) -> Optional[LLM]:
|
||||||
"""Initializes the chat LLM and handles exceptions."""
|
"""Initializes the chat LLM and handles exceptions."""
|
||||||
try:
|
try:
|
||||||
@@ -149,7 +85,7 @@ def build_system_message(crew_chat_inputs: ChatInputs) -> str:
|
|||||||
"Please keep your responses concise and friendly. "
|
"Please keep your responses concise and friendly. "
|
||||||
"If a user asks a question outside the crew's scope, provide a brief answer and remind them of the crew's purpose. "
|
"If a user asks a question outside the crew's scope, provide a brief answer and remind them of the crew's purpose. "
|
||||||
"After calling the tool, be prepared to take user feedback and make adjustments as needed. "
|
"After calling the tool, be prepared to take user feedback and make adjustments as needed. "
|
||||||
"If you are ever unsure about a user's request or need clarification, ask the user for more information. "
|
"If you are ever unsure about a user's request or need clarification, ask the user for more information."
|
||||||
"Before doing anything else, introduce yourself with a friendly message like: 'Hey! I'm here to help you with [crew's purpose]. Could you please provide me with [inputs] so we can get started?' "
|
"Before doing anything else, introduce yourself with a friendly message like: 'Hey! I'm here to help you with [crew's purpose]. Could you please provide me with [inputs] so we can get started?' "
|
||||||
"For example: 'Hey! I'm here to help you with uncovering and reporting cutting-edge developments through thorough research and detailed analysis. Could you please provide me with a topic you're interested in? This will help us generate a comprehensive research report and detailed analysis.'"
|
"For example: 'Hey! I'm here to help you with uncovering and reporting cutting-edge developments through thorough research and detailed analysis. Could you please provide me with a topic you're interested in? This will help us generate a comprehensive research report and detailed analysis.'"
|
||||||
f"\nCrew Name: {crew_chat_inputs.crew_name}"
|
f"\nCrew Name: {crew_chat_inputs.crew_name}"
|
||||||
@@ -166,33 +102,25 @@ def create_tool_function(crew: Crew, messages: List[Dict[str, str]]) -> Any:
|
|||||||
return run_crew_tool_with_messages
|
return run_crew_tool_with_messages
|
||||||
|
|
||||||
|
|
||||||
def flush_input():
|
|
||||||
"""Flush any pending input from the user."""
|
|
||||||
if platform.system() == "Windows":
|
|
||||||
# Windows platform
|
|
||||||
import msvcrt
|
|
||||||
|
|
||||||
while msvcrt.kbhit():
|
|
||||||
msvcrt.getch()
|
|
||||||
else:
|
|
||||||
# Unix-like platforms (Linux, macOS)
|
|
||||||
import termios
|
|
||||||
|
|
||||||
termios.tcflush(sys.stdin, termios.TCIFLUSH)
|
|
||||||
|
|
||||||
|
|
||||||
def chat_loop(chat_llm, messages, crew_tool_schema, available_functions):
|
def chat_loop(chat_llm, messages, crew_tool_schema, available_functions):
|
||||||
"""Main chat loop for interacting with the user."""
|
"""Main chat loop for interacting with the user."""
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
# Flush any pending input before accepting new input
|
user_input = click.prompt("You", type=str)
|
||||||
flush_input()
|
if user_input.strip().lower() in ["exit", "quit"]:
|
||||||
|
click.echo("Exiting chat. Goodbye!")
|
||||||
|
break
|
||||||
|
|
||||||
user_input = get_user_input()
|
messages.append({"role": "user", "content": user_input})
|
||||||
handle_user_input(
|
final_response = chat_llm.call(
|
||||||
user_input, chat_llm, messages, crew_tool_schema, available_functions
|
messages=messages,
|
||||||
|
tools=[crew_tool_schema],
|
||||||
|
available_functions=available_functions,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
messages.append({"role": "assistant", "content": final_response})
|
||||||
|
click.secho(f"\nAssistant: {final_response}\n", fg="green")
|
||||||
|
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
click.echo("\nExiting chat. Goodbye!")
|
click.echo("\nExiting chat. Goodbye!")
|
||||||
break
|
break
|
||||||
@@ -201,55 +129,6 @@ def chat_loop(chat_llm, messages, crew_tool_schema, available_functions):
|
|||||||
break
|
break
|
||||||
|
|
||||||
|
|
||||||
def get_user_input() -> str:
|
|
||||||
"""Collect multi-line user input with exit handling."""
|
|
||||||
click.secho(
|
|
||||||
"\nYou (type your message below. Press 'Enter' twice when you're done):",
|
|
||||||
fg="blue",
|
|
||||||
)
|
|
||||||
user_input_lines = []
|
|
||||||
while True:
|
|
||||||
line = input()
|
|
||||||
if line.strip().lower() == "exit":
|
|
||||||
return "exit"
|
|
||||||
if line == "":
|
|
||||||
break
|
|
||||||
user_input_lines.append(line)
|
|
||||||
return "\n".join(user_input_lines)
|
|
||||||
|
|
||||||
|
|
||||||
def handle_user_input(
|
|
||||||
user_input: str,
|
|
||||||
chat_llm: LLM,
|
|
||||||
messages: List[Dict[str, str]],
|
|
||||||
crew_tool_schema: Dict[str, Any],
|
|
||||||
available_functions: Dict[str, Any],
|
|
||||||
) -> None:
|
|
||||||
if user_input.strip().lower() == "exit":
|
|
||||||
click.echo("Exiting chat. Goodbye!")
|
|
||||||
return
|
|
||||||
|
|
||||||
if not user_input.strip():
|
|
||||||
click.echo("Empty message. Please provide input or type 'exit' to quit.")
|
|
||||||
return
|
|
||||||
|
|
||||||
messages.append({"role": "user", "content": user_input})
|
|
||||||
|
|
||||||
# Indicate that assistant is processing
|
|
||||||
click.echo()
|
|
||||||
click.secho("Assistant is processing your input. Please wait...", fg="green")
|
|
||||||
|
|
||||||
# Process assistant's response
|
|
||||||
final_response = chat_llm.call(
|
|
||||||
messages=messages,
|
|
||||||
tools=[crew_tool_schema],
|
|
||||||
available_functions=available_functions,
|
|
||||||
)
|
|
||||||
|
|
||||||
messages.append({"role": "assistant", "content": final_response})
|
|
||||||
click.secho(f"\nAssistant: {final_response}\n", fg="green")
|
|
||||||
|
|
||||||
|
|
||||||
def generate_crew_tool_schema(crew_inputs: ChatInputs) -> dict:
|
def generate_crew_tool_schema(crew_inputs: ChatInputs) -> dict:
|
||||||
"""
|
"""
|
||||||
Dynamically build a Littellm 'function' schema for the given crew.
|
Dynamically build a Littellm 'function' schema for the given crew.
|
||||||
@@ -444,10 +323,10 @@ def generate_input_description_with_ai(input_name: str, crew: Crew, chat_llm) ->
|
|||||||
):
|
):
|
||||||
# Replace placeholders with input names
|
# Replace placeholders with input names
|
||||||
task_description = placeholder_pattern.sub(
|
task_description = placeholder_pattern.sub(
|
||||||
lambda m: m.group(1), task.description or ""
|
lambda m: m.group(1), task.description
|
||||||
)
|
)
|
||||||
expected_output = placeholder_pattern.sub(
|
expected_output = placeholder_pattern.sub(
|
||||||
lambda m: m.group(1), task.expected_output or ""
|
lambda m: m.group(1), task.expected_output
|
||||||
)
|
)
|
||||||
context_texts.append(f"Task Description: {task_description}")
|
context_texts.append(f"Task Description: {task_description}")
|
||||||
context_texts.append(f"Expected Output: {expected_output}")
|
context_texts.append(f"Expected Output: {expected_output}")
|
||||||
@@ -458,10 +337,10 @@ def generate_input_description_with_ai(input_name: str, crew: Crew, chat_llm) ->
|
|||||||
or f"{{{input_name}}}" in agent.backstory
|
or f"{{{input_name}}}" in agent.backstory
|
||||||
):
|
):
|
||||||
# Replace placeholders with input names
|
# Replace placeholders with input names
|
||||||
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role or "")
|
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role)
|
||||||
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal or "")
|
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal)
|
||||||
agent_backstory = placeholder_pattern.sub(
|
agent_backstory = placeholder_pattern.sub(
|
||||||
lambda m: m.group(1), agent.backstory or ""
|
lambda m: m.group(1), agent.backstory
|
||||||
)
|
)
|
||||||
context_texts.append(f"Agent Role: {agent_role}")
|
context_texts.append(f"Agent Role: {agent_role}")
|
||||||
context_texts.append(f"Agent Goal: {agent_goal}")
|
context_texts.append(f"Agent Goal: {agent_goal}")
|
||||||
@@ -502,20 +381,18 @@ def generate_crew_description_with_ai(crew: Crew, chat_llm) -> str:
|
|||||||
for task in crew.tasks:
|
for task in crew.tasks:
|
||||||
# Replace placeholders with input names
|
# Replace placeholders with input names
|
||||||
task_description = placeholder_pattern.sub(
|
task_description = placeholder_pattern.sub(
|
||||||
lambda m: m.group(1), task.description or ""
|
lambda m: m.group(1), task.description
|
||||||
)
|
)
|
||||||
expected_output = placeholder_pattern.sub(
|
expected_output = placeholder_pattern.sub(
|
||||||
lambda m: m.group(1), task.expected_output or ""
|
lambda m: m.group(1), task.expected_output
|
||||||
)
|
)
|
||||||
context_texts.append(f"Task Description: {task_description}")
|
context_texts.append(f"Task Description: {task_description}")
|
||||||
context_texts.append(f"Expected Output: {expected_output}")
|
context_texts.append(f"Expected Output: {expected_output}")
|
||||||
for agent in crew.agents:
|
for agent in crew.agents:
|
||||||
# Replace placeholders with input names
|
# Replace placeholders with input names
|
||||||
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role or "")
|
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role)
|
||||||
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal or "")
|
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal)
|
||||||
agent_backstory = placeholder_pattern.sub(
|
agent_backstory = placeholder_pattern.sub(lambda m: m.group(1), agent.backstory)
|
||||||
lambda m: m.group(1), agent.backstory or ""
|
|
||||||
)
|
|
||||||
context_texts.append(f"Agent Role: {agent_role}")
|
context_texts.append(f"Agent Role: {agent_role}")
|
||||||
context_texts.append(f"Agent Goal: {agent_goal}")
|
context_texts.append(f"Agent Goal: {agent_goal}")
|
||||||
context_texts.append(f"Agent Backstory: {agent_backstory}")
|
context_texts.append(f"Agent Backstory: {agent_backstory}")
|
||||||
|
|||||||
@@ -2,7 +2,11 @@ import subprocess
|
|||||||
|
|
||||||
import click
|
import click
|
||||||
|
|
||||||
from crewai.cli.utils import get_crew
|
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||||
|
from crewai.memory.entity.entity_memory import EntityMemory
|
||||||
|
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||||
|
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||||
|
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||||
|
|
||||||
|
|
||||||
def reset_memories_command(
|
def reset_memories_command(
|
||||||
@@ -26,35 +30,30 @@ def reset_memories_command(
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
crew = get_crew()
|
|
||||||
if not crew:
|
|
||||||
raise ValueError("No crew found.")
|
|
||||||
if all:
|
if all:
|
||||||
crew.reset_memories(command_type="all")
|
ShortTermMemory().reset()
|
||||||
|
EntityMemory().reset()
|
||||||
|
LongTermMemory().reset()
|
||||||
|
TaskOutputStorageHandler().reset()
|
||||||
|
KnowledgeStorage().reset()
|
||||||
click.echo("All memories have been reset.")
|
click.echo("All memories have been reset.")
|
||||||
return
|
else:
|
||||||
|
if long:
|
||||||
|
LongTermMemory().reset()
|
||||||
|
click.echo("Long term memory has been reset.")
|
||||||
|
|
||||||
if not any([long, short, entity, kickoff_outputs, knowledge]):
|
if short:
|
||||||
click.echo(
|
ShortTermMemory().reset()
|
||||||
"No memory type specified. Please specify at least one type to reset."
|
click.echo("Short term memory has been reset.")
|
||||||
)
|
if entity:
|
||||||
return
|
EntityMemory().reset()
|
||||||
|
click.echo("Entity memory has been reset.")
|
||||||
if long:
|
if kickoff_outputs:
|
||||||
crew.reset_memories(command_type="long")
|
TaskOutputStorageHandler().reset()
|
||||||
click.echo("Long term memory has been reset.")
|
click.echo("Latest Kickoff outputs stored has been reset.")
|
||||||
if short:
|
if knowledge:
|
||||||
crew.reset_memories(command_type="short")
|
KnowledgeStorage().reset()
|
||||||
click.echo("Short term memory has been reset.")
|
click.echo("Knowledge has been reset.")
|
||||||
if entity:
|
|
||||||
crew.reset_memories(command_type="entity")
|
|
||||||
click.echo("Entity memory has been reset.")
|
|
||||||
if kickoff_outputs:
|
|
||||||
crew.reset_memories(command_type="kickoff_outputs")
|
|
||||||
click.echo("Latest Kickoff outputs stored has been reset.")
|
|
||||||
if knowledge:
|
|
||||||
crew.reset_memories(command_type="knowledge")
|
|
||||||
click.echo("Knowledge has been reset.")
|
|
||||||
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
click.echo(f"An error occurred while resetting the memories: {e}", err=True)
|
click.echo(f"An error occurred while resetting the memories: {e}", err=True)
|
||||||
|
|||||||
1
src/crewai/cli/templates/crew/.gitignore
vendored
1
src/crewai/cli/templates/crew/.gitignore
vendored
@@ -1,3 +1,2 @@
|
|||||||
.env
|
.env
|
||||||
__pycache__/
|
__pycache__/
|
||||||
.DS_Store
|
|
||||||
|
|||||||
@@ -56,8 +56,7 @@ def test():
|
|||||||
Test the crew execution and returns the results.
|
Test the crew execution and returns the results.
|
||||||
"""
|
"""
|
||||||
inputs = {
|
inputs = {
|
||||||
"topic": "AI LLMs",
|
"topic": "AI LLMs"
|
||||||
"current_year": str(datetime.now().year)
|
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
|
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
|||||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||||
requires-python = ">=3.10,<3.13"
|
requires-python = ">=3.10,<3.13"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crewai[tools]>=0.102.0,<1.0.0"
|
"crewai[tools]>=0.95.0,<1.0.0"
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
|
|||||||
1
src/crewai/cli/templates/flow/.gitignore
vendored
1
src/crewai/cli/templates/flow/.gitignore
vendored
@@ -1,4 +1,3 @@
|
|||||||
.env
|
.env
|
||||||
__pycache__/
|
__pycache__/
|
||||||
lib/
|
lib/
|
||||||
.DS_Store
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from random import randint
|
|||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from crewai.flow import Flow, listen, start
|
from crewai.flow.flow import Flow, listen, start
|
||||||
|
|
||||||
from {{folder_name}}.crews.poem_crew.poem_crew import PoemCrew
|
from {{folder_name}}.crews.poem_crew.poem_crew import PoemCrew
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
|||||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||||
requires-python = ">=3.10,<3.13"
|
requires-python = ">=3.10,<3.13"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crewai[tools]>=0.102.0,<1.0.0",
|
"crewai[tools]>=0.95.0,<1.0.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
|||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.10,<3.13"
|
requires-python = ">=3.10,<3.13"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crewai[tools]>=0.102.0"
|
"crewai[tools]>=0.95.0"
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.crewai]
|
[tool.crewai]
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ import tomli
|
|||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
|
|
||||||
from crewai.cli.constants import ENV_VARS
|
from crewai.cli.constants import ENV_VARS
|
||||||
from crewai.crew import Crew
|
|
||||||
|
|
||||||
if sys.version_info >= (3, 11):
|
if sys.version_info >= (3, 11):
|
||||||
import tomllib
|
import tomllib
|
||||||
@@ -248,66 +247,3 @@ def write_env_file(folder_path, env_vars):
|
|||||||
with open(env_file_path, "w") as file:
|
with open(env_file_path, "w") as file:
|
||||||
for key, value in env_vars.items():
|
for key, value in env_vars.items():
|
||||||
file.write(f"{key}={value}\n")
|
file.write(f"{key}={value}\n")
|
||||||
|
|
||||||
|
|
||||||
def get_crew(crew_path: str = "crew.py", require: bool = False) -> Crew | None:
|
|
||||||
"""Get the crew instance from the crew.py file."""
|
|
||||||
try:
|
|
||||||
import importlib.util
|
|
||||||
import os
|
|
||||||
|
|
||||||
for root, _, files in os.walk("."):
|
|
||||||
if crew_path in files:
|
|
||||||
crew_os_path = os.path.join(root, crew_path)
|
|
||||||
try:
|
|
||||||
spec = importlib.util.spec_from_file_location(
|
|
||||||
"crew_module", crew_os_path
|
|
||||||
)
|
|
||||||
if not spec or not spec.loader:
|
|
||||||
continue
|
|
||||||
module = importlib.util.module_from_spec(spec)
|
|
||||||
try:
|
|
||||||
sys.modules[spec.name] = module
|
|
||||||
spec.loader.exec_module(module)
|
|
||||||
|
|
||||||
for attr_name in dir(module):
|
|
||||||
attr = getattr(module, attr_name)
|
|
||||||
try:
|
|
||||||
if isinstance(attr, Crew) and hasattr(attr, "kickoff"):
|
|
||||||
print(
|
|
||||||
f"Found valid crew object in attribute '{attr_name}' at {crew_os_path}."
|
|
||||||
)
|
|
||||||
return attr
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing attribute {attr_name}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
except Exception as exec_error:
|
|
||||||
print(f"Error executing module: {exec_error}")
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
print(f"Traceback: {traceback.format_exc()}")
|
|
||||||
|
|
||||||
except (ImportError, AttributeError) as e:
|
|
||||||
if require:
|
|
||||||
console.print(
|
|
||||||
f"Error importing crew from {crew_path}: {str(e)}",
|
|
||||||
style="bold red",
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
|
|
||||||
break
|
|
||||||
|
|
||||||
if require:
|
|
||||||
console.print("No valid Crew instance found in crew.py", style="bold red")
|
|
||||||
raise SystemExit
|
|
||||||
return None
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
if require:
|
|
||||||
console.print(
|
|
||||||
f"Unexpected error while loading crew: {str(e)}", style="bold red"
|
|
||||||
)
|
|
||||||
raise SystemExit
|
|
||||||
return None
|
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import re
|
|||||||
import uuid
|
import uuid
|
||||||
import warnings
|
import warnings
|
||||||
from concurrent.futures import Future
|
from concurrent.futures import Future
|
||||||
from copy import copy as shallow_copy
|
|
||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
|
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
|
||||||
|
|
||||||
@@ -35,25 +34,15 @@ from crewai.process import Process
|
|||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
from crewai.tasks.conditional_task import ConditionalTask
|
from crewai.tasks.conditional_task import ConditionalTask
|
||||||
from crewai.tasks.task_output import TaskOutput
|
from crewai.tasks.task_output import TaskOutput
|
||||||
|
from crewai.telemetry import Telemetry
|
||||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||||
from crewai.tools.base_tool import Tool
|
from crewai.tools.base_tool import Tool
|
||||||
|
from crewai.types.crew_chat import ChatInputs
|
||||||
from crewai.types.usage_metrics import UsageMetrics
|
from crewai.types.usage_metrics import UsageMetrics
|
||||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||||
from crewai.utilities.events.crew_events import (
|
|
||||||
CrewKickoffCompletedEvent,
|
|
||||||
CrewKickoffFailedEvent,
|
|
||||||
CrewKickoffStartedEvent,
|
|
||||||
CrewTestCompletedEvent,
|
|
||||||
CrewTestFailedEvent,
|
|
||||||
CrewTestStartedEvent,
|
|
||||||
CrewTrainCompletedEvent,
|
|
||||||
CrewTrainFailedEvent,
|
|
||||||
CrewTrainStartedEvent,
|
|
||||||
)
|
|
||||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
|
||||||
from crewai.utilities.formatter import (
|
from crewai.utilities.formatter import (
|
||||||
aggregate_raw_outputs_from_task_outputs,
|
aggregate_raw_outputs_from_task_outputs,
|
||||||
aggregate_raw_outputs_from_tasks,
|
aggregate_raw_outputs_from_tasks,
|
||||||
@@ -63,6 +52,12 @@ from crewai.utilities.planning_handler import CrewPlanner
|
|||||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||||
|
|
||||||
|
try:
|
||||||
|
import agentops # type: ignore
|
||||||
|
except ImportError:
|
||||||
|
agentops = None
|
||||||
|
|
||||||
|
|
||||||
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
||||||
|
|
||||||
|
|
||||||
@@ -89,7 +84,6 @@ class Crew(BaseModel):
|
|||||||
step_callback: Callback to be executed after each step for every agents execution.
|
step_callback: Callback to be executed after each step for every agents execution.
|
||||||
share_crew: Whether you want to share the complete crew information and execution with crewAI to make the library better, and allow us to train models.
|
share_crew: Whether you want to share the complete crew information and execution with crewAI to make the library better, and allow us to train models.
|
||||||
planning: Plan the crew execution and add the plan to the crew.
|
planning: Plan the crew execution and add the plan to the crew.
|
||||||
chat_llm: The language model used for orchestrating chat interactions with the crew.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__hash__ = object.__hash__ # type: ignore
|
__hash__ = object.__hash__ # type: ignore
|
||||||
@@ -188,9 +182,9 @@ class Crew(BaseModel):
|
|||||||
default=None,
|
default=None,
|
||||||
description="Path to the prompt json file to be used for the crew.",
|
description="Path to the prompt json file to be used for the crew.",
|
||||||
)
|
)
|
||||||
output_log_file: Optional[Union[bool, str]] = Field(
|
output_log_file: Optional[str] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="Path to the log file to be saved",
|
description="output_log_file",
|
||||||
)
|
)
|
||||||
planning: Optional[bool] = Field(
|
planning: Optional[bool] = Field(
|
||||||
default=False,
|
default=False,
|
||||||
@@ -216,9 +210,8 @@ class Crew(BaseModel):
|
|||||||
default=None,
|
default=None,
|
||||||
description="LLM used to handle chatting with the crew.",
|
description="LLM used to handle chatting with the crew.",
|
||||||
)
|
)
|
||||||
knowledge: Optional[Knowledge] = Field(
|
_knowledge: Optional[Knowledge] = PrivateAttr(
|
||||||
default=None,
|
default=None,
|
||||||
description="Knowledge for the crew.",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@field_validator("id", mode="before")
|
@field_validator("id", mode="before")
|
||||||
@@ -256,6 +249,8 @@ class Crew(BaseModel):
|
|||||||
if self.function_calling_llm and not isinstance(self.function_calling_llm, LLM):
|
if self.function_calling_llm and not isinstance(self.function_calling_llm, LLM):
|
||||||
self.function_calling_llm = create_llm(self.function_calling_llm)
|
self.function_calling_llm = create_llm(self.function_calling_llm)
|
||||||
|
|
||||||
|
self._telemetry = Telemetry()
|
||||||
|
self._telemetry.set_tracer()
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
@@ -278,26 +273,12 @@ class Crew(BaseModel):
|
|||||||
if self.entity_memory
|
if self.entity_memory
|
||||||
else EntityMemory(crew=self, embedder_config=self.embedder)
|
else EntityMemory(crew=self, embedder_config=self.embedder)
|
||||||
)
|
)
|
||||||
if (
|
if hasattr(self, "memory_config") and self.memory_config is not None:
|
||||||
self.memory_config and "user_memory" in self.memory_config
|
self._user_memory = (
|
||||||
): # Check for user_memory in config
|
self.user_memory if self.user_memory else UserMemory(crew=self)
|
||||||
user_memory_config = self.memory_config["user_memory"]
|
)
|
||||||
if isinstance(
|
|
||||||
user_memory_config, UserMemory
|
|
||||||
): # Check if it is already an instance
|
|
||||||
self._user_memory = user_memory_config
|
|
||||||
elif isinstance(
|
|
||||||
user_memory_config, dict
|
|
||||||
): # Check if it's a configuration dict
|
|
||||||
self._user_memory = UserMemory(
|
|
||||||
crew=self, **user_memory_config
|
|
||||||
) # Initialize with config
|
|
||||||
else:
|
|
||||||
raise TypeError(
|
|
||||||
"user_memory must be a UserMemory instance or a configuration dictionary"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
self._user_memory = None # No user memory if not in config
|
self._user_memory = None
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
@@ -308,9 +289,9 @@ class Crew(BaseModel):
|
|||||||
if isinstance(self.knowledge_sources, list) and all(
|
if isinstance(self.knowledge_sources, list) and all(
|
||||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||||
):
|
):
|
||||||
self.knowledge = Knowledge(
|
self._knowledge = Knowledge(
|
||||||
sources=self.knowledge_sources,
|
sources=self.knowledge_sources,
|
||||||
embedder=self.embedder,
|
embedder_config=self.embedder,
|
||||||
collection_name="crew",
|
collection_name="crew",
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -397,22 +378,6 @@ class Crew(BaseModel):
|
|||||||
|
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@model_validator(mode="after")
|
|
||||||
def validate_must_have_non_conditional_task(self) -> "Crew":
|
|
||||||
"""Ensure that a crew has at least one non-conditional task."""
|
|
||||||
if not self.tasks:
|
|
||||||
return self
|
|
||||||
non_conditional_count = sum(
|
|
||||||
1 for task in self.tasks if not isinstance(task, ConditionalTask)
|
|
||||||
)
|
|
||||||
if non_conditional_count == 0:
|
|
||||||
raise PydanticCustomError(
|
|
||||||
"only_conditional_tasks",
|
|
||||||
"Crew must include at least one non-conditional task",
|
|
||||||
{},
|
|
||||||
)
|
|
||||||
return self
|
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def validate_first_task(self) -> "Crew":
|
def validate_first_task(self) -> "Crew":
|
||||||
"""Ensure the first task is not a ConditionalTask."""
|
"""Ensure the first task is not a ConditionalTask."""
|
||||||
@@ -524,121 +489,83 @@ class Crew(BaseModel):
|
|||||||
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {}
|
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {}
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Trains the crew for a given number of iterations."""
|
"""Trains the crew for a given number of iterations."""
|
||||||
try:
|
train_crew = self.copy()
|
||||||
crewai_event_bus.emit(
|
train_crew._setup_for_training(filename)
|
||||||
self,
|
|
||||||
CrewTrainStartedEvent(
|
|
||||||
crew_name=self.name or "crew",
|
|
||||||
n_iterations=n_iterations,
|
|
||||||
filename=filename,
|
|
||||||
inputs=inputs,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
train_crew = self.copy()
|
|
||||||
train_crew._setup_for_training(filename)
|
|
||||||
|
|
||||||
for n_iteration in range(n_iterations):
|
for n_iteration in range(n_iterations):
|
||||||
train_crew._train_iteration = n_iteration
|
train_crew._train_iteration = n_iteration
|
||||||
train_crew.kickoff(inputs=inputs)
|
train_crew.kickoff(inputs=inputs)
|
||||||
|
|
||||||
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||||
|
|
||||||
for agent in train_crew.agents:
|
for agent in train_crew.agents:
|
||||||
if training_data.get(str(agent.id)):
|
if training_data.get(str(agent.id)):
|
||||||
result = TaskEvaluator(agent).evaluate_training_data(
|
result = TaskEvaluator(agent).evaluate_training_data(
|
||||||
training_data=training_data, agent_id=str(agent.id)
|
training_data=training_data, agent_id=str(agent.id)
|
||||||
)
|
)
|
||||||
CrewTrainingHandler(filename).save_trained_data(
|
|
||||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
|
||||||
)
|
|
||||||
|
|
||||||
crewai_event_bus.emit(
|
CrewTrainingHandler(filename).save_trained_data(
|
||||||
self,
|
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||||
CrewTrainCompletedEvent(
|
)
|
||||||
crew_name=self.name or "crew",
|
|
||||||
n_iterations=n_iterations,
|
|
||||||
filename=filename,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
CrewTrainFailedEvent(error=str(e), crew_name=self.name or "crew"),
|
|
||||||
)
|
|
||||||
self._logger.log("error", f"Training failed: {e}", color="red")
|
|
||||||
CrewTrainingHandler(TRAINING_DATA_FILE).clear()
|
|
||||||
CrewTrainingHandler(filename).clear()
|
|
||||||
raise
|
|
||||||
|
|
||||||
def kickoff(
|
def kickoff(
|
||||||
self,
|
self,
|
||||||
inputs: Optional[Dict[str, Any]] = None,
|
inputs: Optional[Dict[str, Any]] = None,
|
||||||
) -> CrewOutput:
|
) -> CrewOutput:
|
||||||
try:
|
for before_callback in self.before_kickoff_callbacks:
|
||||||
for before_callback in self.before_kickoff_callbacks:
|
if inputs is None:
|
||||||
if inputs is None:
|
inputs = {}
|
||||||
inputs = {}
|
inputs = before_callback(inputs)
|
||||||
inputs = before_callback(inputs)
|
|
||||||
|
|
||||||
crewai_event_bus.emit(
|
"""Starts the crew to work on its assigned tasks."""
|
||||||
self,
|
self._execution_span = self._telemetry.crew_execution_span(self, inputs)
|
||||||
CrewKickoffStartedEvent(crew_name=self.name or "crew", inputs=inputs),
|
self._task_output_handler.reset()
|
||||||
|
self._logging_color = "bold_purple"
|
||||||
|
|
||||||
|
if inputs is not None:
|
||||||
|
self._inputs = inputs
|
||||||
|
self._interpolate_inputs(inputs)
|
||||||
|
self._set_tasks_callbacks()
|
||||||
|
|
||||||
|
i18n = I18N(prompt_file=self.prompt_file)
|
||||||
|
|
||||||
|
for agent in self.agents:
|
||||||
|
agent.i18n = i18n
|
||||||
|
# type: ignore[attr-defined] # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
||||||
|
agent.crew = self # type: ignore[attr-defined]
|
||||||
|
# TODO: Create an AgentFunctionCalling protocol for future refactoring
|
||||||
|
if not agent.function_calling_llm: # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
||||||
|
agent.function_calling_llm = self.function_calling_llm # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
||||||
|
|
||||||
|
if not agent.step_callback: # type: ignore # "BaseAgent" has no attribute "step_callback"
|
||||||
|
agent.step_callback = self.step_callback # type: ignore # "BaseAgent" has no attribute "step_callback"
|
||||||
|
|
||||||
|
agent.create_agent_executor()
|
||||||
|
|
||||||
|
if self.planning:
|
||||||
|
self._handle_crew_planning()
|
||||||
|
|
||||||
|
metrics: List[UsageMetrics] = []
|
||||||
|
|
||||||
|
if self.process == Process.sequential:
|
||||||
|
result = self._run_sequential_process()
|
||||||
|
elif self.process == Process.hierarchical:
|
||||||
|
result = self._run_hierarchical_process()
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(
|
||||||
|
f"The process '{self.process}' is not implemented yet."
|
||||||
)
|
)
|
||||||
|
|
||||||
# Starts the crew to work on its assigned tasks.
|
for after_callback in self.after_kickoff_callbacks:
|
||||||
self._task_output_handler.reset()
|
result = after_callback(result)
|
||||||
self._logging_color = "bold_purple"
|
|
||||||
|
|
||||||
if inputs is not None:
|
metrics += [agent._token_process.get_summary() for agent in self.agents]
|
||||||
self._inputs = inputs
|
|
||||||
self._interpolate_inputs(inputs)
|
|
||||||
self._set_tasks_callbacks()
|
|
||||||
|
|
||||||
i18n = I18N(prompt_file=self.prompt_file)
|
self.usage_metrics = UsageMetrics()
|
||||||
|
for metric in metrics:
|
||||||
|
self.usage_metrics.add_usage_metrics(metric)
|
||||||
|
|
||||||
for agent in self.agents:
|
return result
|
||||||
agent.i18n = i18n
|
|
||||||
# type: ignore[attr-defined] # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
|
||||||
agent.crew = self # type: ignore[attr-defined]
|
|
||||||
agent.set_knowledge(crew_embedder=self.embedder)
|
|
||||||
# TODO: Create an AgentFunctionCalling protocol for future refactoring
|
|
||||||
if not agent.function_calling_llm: # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
|
||||||
agent.function_calling_llm = self.function_calling_llm # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
|
||||||
|
|
||||||
if not agent.step_callback: # type: ignore # "BaseAgent" has no attribute "step_callback"
|
|
||||||
agent.step_callback = self.step_callback # type: ignore # "BaseAgent" has no attribute "step_callback"
|
|
||||||
|
|
||||||
agent.create_agent_executor()
|
|
||||||
|
|
||||||
if self.planning:
|
|
||||||
self._handle_crew_planning()
|
|
||||||
|
|
||||||
metrics: List[UsageMetrics] = []
|
|
||||||
|
|
||||||
if self.process == Process.sequential:
|
|
||||||
result = self._run_sequential_process()
|
|
||||||
elif self.process == Process.hierarchical:
|
|
||||||
result = self._run_hierarchical_process()
|
|
||||||
else:
|
|
||||||
raise NotImplementedError(
|
|
||||||
f"The process '{self.process}' is not implemented yet."
|
|
||||||
)
|
|
||||||
|
|
||||||
for after_callback in self.after_kickoff_callbacks:
|
|
||||||
result = after_callback(result)
|
|
||||||
|
|
||||||
metrics += [agent._token_process.get_summary() for agent in self.agents]
|
|
||||||
|
|
||||||
self.usage_metrics = UsageMetrics()
|
|
||||||
for metric in metrics:
|
|
||||||
self.usage_metrics.add_usage_metrics(metric)
|
|
||||||
return result
|
|
||||||
except Exception as e:
|
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
CrewKickoffFailedEvent(error=str(e), crew_name=self.name or "crew"),
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
|
|
||||||
def kickoff_for_each(self, inputs: List[Dict[str, Any]]) -> List[CrewOutput]:
|
def kickoff_for_each(self, inputs: List[Dict[str, Any]]) -> List[CrewOutput]:
|
||||||
"""Executes the Crew's workflow for each input in the list and aggregates results."""
|
"""Executes the Crew's workflow for each input in the list and aggregates results."""
|
||||||
@@ -747,7 +674,12 @@ class Crew(BaseModel):
|
|||||||
manager.tools = []
|
manager.tools = []
|
||||||
raise Exception("Manager agent should not have tools")
|
raise Exception("Manager agent should not have tools")
|
||||||
else:
|
else:
|
||||||
self.manager_llm = create_llm(self.manager_llm)
|
self.manager_llm = (
|
||||||
|
getattr(self.manager_llm, "model_name", None)
|
||||||
|
or getattr(self.manager_llm, "model", None)
|
||||||
|
or getattr(self.manager_llm, "deployment_name", None)
|
||||||
|
or self.manager_llm
|
||||||
|
)
|
||||||
manager = Agent(
|
manager = Agent(
|
||||||
role=i18n.retrieve("hierarchical_manager_agent", "role"),
|
role=i18n.retrieve("hierarchical_manager_agent", "role"),
|
||||||
goal=i18n.retrieve("hierarchical_manager_agent", "goal"),
|
goal=i18n.retrieve("hierarchical_manager_agent", "goal"),
|
||||||
@@ -807,7 +739,6 @@ class Crew(BaseModel):
|
|||||||
task, task_outputs, futures, task_index, was_replayed
|
task, task_outputs, futures, task_index, was_replayed
|
||||||
)
|
)
|
||||||
if skipped_task_output:
|
if skipped_task_output:
|
||||||
task_outputs.append(skipped_task_output)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if task.async_execution:
|
if task.async_execution:
|
||||||
@@ -831,7 +762,7 @@ class Crew(BaseModel):
|
|||||||
context=context,
|
context=context,
|
||||||
tools=tools_for_task,
|
tools=tools_for_task,
|
||||||
)
|
)
|
||||||
task_outputs.append(task_output)
|
task_outputs = [task_output]
|
||||||
self._process_task_result(task, task_output)
|
self._process_task_result(task, task_output)
|
||||||
self._store_execution_log(task, task_output, task_index, was_replayed)
|
self._store_execution_log(task, task_output, task_index, was_replayed)
|
||||||
|
|
||||||
@@ -852,7 +783,7 @@ class Crew(BaseModel):
|
|||||||
task_outputs = self._process_async_tasks(futures, was_replayed)
|
task_outputs = self._process_async_tasks(futures, was_replayed)
|
||||||
futures.clear()
|
futures.clear()
|
||||||
|
|
||||||
previous_output = task_outputs[-1] if task_outputs else None
|
previous_output = task_outputs[task_index - 1] if task_outputs else None
|
||||||
if previous_output is not None and not task.should_execute(previous_output):
|
if previous_output is not None and not task.should_execute(previous_output):
|
||||||
self._logger.log(
|
self._logger.log(
|
||||||
"debug",
|
"debug",
|
||||||
@@ -974,29 +905,20 @@ class Crew(BaseModel):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
|
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
|
||||||
if not task_outputs:
|
if len(task_outputs) != 1:
|
||||||
raise ValueError("No task outputs available to create crew output.")
|
raise ValueError(
|
||||||
|
"Something went wrong. Kickoff should return only one task output."
|
||||||
# Filter out empty outputs and get the last valid one as the main output
|
)
|
||||||
valid_outputs = [t for t in task_outputs if t.raw]
|
final_task_output = task_outputs[0]
|
||||||
if not valid_outputs:
|
|
||||||
raise ValueError("No valid task outputs available to create crew output.")
|
|
||||||
final_task_output = valid_outputs[-1]
|
|
||||||
|
|
||||||
final_string_output = final_task_output.raw
|
final_string_output = final_task_output.raw
|
||||||
self._finish_execution(final_string_output)
|
self._finish_execution(final_string_output)
|
||||||
token_usage = self.calculate_usage_metrics()
|
token_usage = self.calculate_usage_metrics()
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
CrewKickoffCompletedEvent(
|
|
||||||
crew_name=self.name or "crew", output=final_task_output
|
|
||||||
),
|
|
||||||
)
|
|
||||||
return CrewOutput(
|
return CrewOutput(
|
||||||
raw=final_task_output.raw,
|
raw=final_task_output.raw,
|
||||||
pydantic=final_task_output.pydantic,
|
pydantic=final_task_output.pydantic,
|
||||||
json_dict=final_task_output.json_dict,
|
json_dict=final_task_output.json_dict,
|
||||||
tasks_output=task_outputs,
|
tasks_output=[task.output for task in self.tasks if task.output],
|
||||||
token_usage=token_usage,
|
token_usage=token_usage,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1069,8 +991,8 @@ class Crew(BaseModel):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
def query_knowledge(self, query: List[str]) -> Union[List[Dict[str, Any]], None]:
|
def query_knowledge(self, query: List[str]) -> Union[List[Dict[str, Any]], None]:
|
||||||
if self.knowledge:
|
if self._knowledge:
|
||||||
return self.knowledge.query(query)
|
return self._knowledge.query(query)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def fetch_inputs(self) -> Set[str]:
|
def fetch_inputs(self) -> Set[str]:
|
||||||
@@ -1111,10 +1033,9 @@ class Crew(BaseModel):
|
|||||||
"_short_term_memory",
|
"_short_term_memory",
|
||||||
"_long_term_memory",
|
"_long_term_memory",
|
||||||
"_entity_memory",
|
"_entity_memory",
|
||||||
|
"_telemetry",
|
||||||
"agents",
|
"agents",
|
||||||
"tasks",
|
"tasks",
|
||||||
"knowledge_sources",
|
|
||||||
"knowledge",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cloned_agents = [agent.copy() for agent in self.agents]
|
cloned_agents = [agent.copy() for agent in self.agents]
|
||||||
@@ -1122,9 +1043,6 @@ class Crew(BaseModel):
|
|||||||
task_mapping = {}
|
task_mapping = {}
|
||||||
|
|
||||||
cloned_tasks = []
|
cloned_tasks = []
|
||||||
existing_knowledge_sources = shallow_copy(self.knowledge_sources)
|
|
||||||
existing_knowledge = shallow_copy(self.knowledge)
|
|
||||||
|
|
||||||
for task in self.tasks:
|
for task in self.tasks:
|
||||||
cloned_task = task.copy(cloned_agents, task_mapping)
|
cloned_task = task.copy(cloned_agents, task_mapping)
|
||||||
cloned_tasks.append(cloned_task)
|
cloned_tasks.append(cloned_task)
|
||||||
@@ -1144,13 +1062,7 @@ class Crew(BaseModel):
|
|||||||
copied_data.pop("agents", None)
|
copied_data.pop("agents", None)
|
||||||
copied_data.pop("tasks", None)
|
copied_data.pop("tasks", None)
|
||||||
|
|
||||||
copied_crew = Crew(
|
copied_crew = Crew(**copied_data, agents=cloned_agents, tasks=cloned_tasks)
|
||||||
**copied_data,
|
|
||||||
agents=cloned_agents,
|
|
||||||
tasks=cloned_tasks,
|
|
||||||
knowledge_sources=existing_knowledge_sources,
|
|
||||||
knowledge=existing_knowledge,
|
|
||||||
)
|
|
||||||
|
|
||||||
return copied_crew
|
return copied_crew
|
||||||
|
|
||||||
@@ -1176,6 +1088,13 @@ class Crew(BaseModel):
|
|||||||
def _finish_execution(self, final_string_output: str) -> None:
|
def _finish_execution(self, final_string_output: str) -> None:
|
||||||
if self.max_rpm:
|
if self.max_rpm:
|
||||||
self._rpm_controller.stop_rpm_counter()
|
self._rpm_controller.stop_rpm_counter()
|
||||||
|
if agentops:
|
||||||
|
agentops.end_session(
|
||||||
|
end_state="Success",
|
||||||
|
end_state_reason="Finished Execution",
|
||||||
|
is_auto_end=True,
|
||||||
|
)
|
||||||
|
self._telemetry.end_crew(self, final_string_output)
|
||||||
|
|
||||||
def calculate_usage_metrics(self) -> UsageMetrics:
|
def calculate_usage_metrics(self) -> UsageMetrics:
|
||||||
"""Calculates and returns the usage metrics."""
|
"""Calculates and returns the usage metrics."""
|
||||||
@@ -1193,122 +1112,25 @@ class Crew(BaseModel):
|
|||||||
def test(
|
def test(
|
||||||
self,
|
self,
|
||||||
n_iterations: int,
|
n_iterations: int,
|
||||||
eval_llm: Union[str, InstanceOf[LLM]],
|
openai_model_name: Optional[str] = None,
|
||||||
inputs: Optional[Dict[str, Any]] = None,
|
inputs: Optional[Dict[str, Any]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
||||||
try:
|
test_crew = self.copy()
|
||||||
eval_llm = create_llm(eval_llm)
|
|
||||||
if not eval_llm:
|
|
||||||
raise ValueError("Failed to create LLM instance.")
|
|
||||||
|
|
||||||
crewai_event_bus.emit(
|
self._test_execution_span = test_crew._telemetry.test_execution_span(
|
||||||
self,
|
test_crew,
|
||||||
CrewTestStartedEvent(
|
n_iterations,
|
||||||
crew_name=self.name or "crew",
|
inputs,
|
||||||
n_iterations=n_iterations,
|
openai_model_name, # type: ignore[arg-type]
|
||||||
eval_llm=eval_llm,
|
) # type: ignore[arg-type]
|
||||||
inputs=inputs,
|
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
|
||||||
),
|
|
||||||
)
|
|
||||||
test_crew = self.copy()
|
|
||||||
evaluator = CrewEvaluator(test_crew, eval_llm) # type: ignore[arg-type]
|
|
||||||
|
|
||||||
for i in range(1, n_iterations + 1):
|
for i in range(1, n_iterations + 1):
|
||||||
evaluator.set_iteration(i)
|
evaluator.set_iteration(i)
|
||||||
test_crew.kickoff(inputs=inputs)
|
test_crew.kickoff(inputs=inputs)
|
||||||
|
|
||||||
evaluator.print_crew_evaluation_result()
|
evaluator.print_crew_evaluation_result()
|
||||||
|
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
CrewTestCompletedEvent(
|
|
||||||
crew_name=self.name or "crew",
|
|
||||||
),
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
CrewTestFailedEvent(error=str(e), crew_name=self.name or "crew"),
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
|
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
|
||||||
|
|
||||||
def reset_memories(self, command_type: str) -> None:
|
|
||||||
"""Reset specific or all memories for the crew.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
command_type: Type of memory to reset.
|
|
||||||
Valid options: 'long', 'short', 'entity', 'knowledge',
|
|
||||||
'kickoff_outputs', or 'all'
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If an invalid command type is provided.
|
|
||||||
RuntimeError: If memory reset operation fails.
|
|
||||||
"""
|
|
||||||
VALID_TYPES = frozenset(
|
|
||||||
["long", "short", "entity", "knowledge", "kickoff_outputs", "all"]
|
|
||||||
)
|
|
||||||
|
|
||||||
if command_type not in VALID_TYPES:
|
|
||||||
raise ValueError(
|
|
||||||
f"Invalid command type. Must be one of: {', '.join(sorted(VALID_TYPES))}"
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if command_type == "all":
|
|
||||||
self._reset_all_memories()
|
|
||||||
else:
|
|
||||||
self._reset_specific_memory(command_type)
|
|
||||||
|
|
||||||
self._logger.log("info", f"{command_type} memory has been reset")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
error_msg = f"Failed to reset {command_type} memory: {str(e)}"
|
|
||||||
self._logger.log("error", error_msg)
|
|
||||||
raise RuntimeError(error_msg) from e
|
|
||||||
|
|
||||||
def _reset_all_memories(self) -> None:
|
|
||||||
"""Reset all available memory systems."""
|
|
||||||
memory_systems = [
|
|
||||||
("short term", getattr(self, "_short_term_memory", None)),
|
|
||||||
("entity", getattr(self, "_entity_memory", None)),
|
|
||||||
("long term", getattr(self, "_long_term_memory", None)),
|
|
||||||
("task output", getattr(self, "_task_output_handler", None)),
|
|
||||||
("knowledge", getattr(self, "knowledge", None)),
|
|
||||||
]
|
|
||||||
|
|
||||||
for name, system in memory_systems:
|
|
||||||
if system is not None:
|
|
||||||
try:
|
|
||||||
system.reset()
|
|
||||||
except Exception as e:
|
|
||||||
raise RuntimeError(f"Failed to reset {name} memory") from e
|
|
||||||
|
|
||||||
def _reset_specific_memory(self, memory_type: str) -> None:
|
|
||||||
"""Reset a specific memory system.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
memory_type: Type of memory to reset
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
RuntimeError: If the specified memory system fails to reset
|
|
||||||
"""
|
|
||||||
reset_functions = {
|
|
||||||
"long": (self._long_term_memory, "long term"),
|
|
||||||
"short": (self._short_term_memory, "short term"),
|
|
||||||
"entity": (self._entity_memory, "entity"),
|
|
||||||
"knowledge": (self.knowledge, "knowledge"),
|
|
||||||
"kickoff_outputs": (self._task_output_handler, "task output"),
|
|
||||||
}
|
|
||||||
|
|
||||||
memory_system, name = reset_functions[memory_type]
|
|
||||||
if memory_system is None:
|
|
||||||
raise RuntimeError(f"{name} memory system is not initialized")
|
|
||||||
|
|
||||||
try:
|
|
||||||
memory_system.reset()
|
|
||||||
except Exception as e:
|
|
||||||
raise RuntimeError(f"Failed to reset {name} memory") from e
|
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
from crewai.flow.flow import Flow, start, listen, or_, and_, router
|
from crewai.flow.flow import Flow
|
||||||
from crewai.flow.persistence import persist
|
|
||||||
|
|
||||||
__all__ = ["Flow", "start", "listen", "or_", "and_", "router", "persist"]
|
|
||||||
|
|
||||||
|
__all__ = ["Flow"]
|
||||||
|
|||||||
@@ -1,7 +1,5 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import copy
|
|
||||||
import inspect
|
import inspect
|
||||||
import logging
|
|
||||||
from typing import (
|
from typing import (
|
||||||
Any,
|
Any,
|
||||||
Callable,
|
Callable,
|
||||||
@@ -17,82 +15,25 @@ from typing import (
|
|||||||
)
|
)
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
|
from blinker import Signal
|
||||||
from pydantic import BaseModel, Field, ValidationError
|
from pydantic import BaseModel, Field, ValidationError
|
||||||
|
|
||||||
from crewai.flow.flow_visualizer import plot_flow
|
from crewai.flow.flow_events import (
|
||||||
from crewai.flow.persistence.base import FlowPersistence
|
|
||||||
from crewai.flow.utils import get_possible_return_constants
|
|
||||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
|
||||||
from crewai.utilities.events.flow_events import (
|
|
||||||
FlowCreatedEvent,
|
|
||||||
FlowFinishedEvent,
|
FlowFinishedEvent,
|
||||||
FlowPlotEvent,
|
|
||||||
FlowStartedEvent,
|
FlowStartedEvent,
|
||||||
MethodExecutionFailedEvent,
|
|
||||||
MethodExecutionFinishedEvent,
|
MethodExecutionFinishedEvent,
|
||||||
MethodExecutionStartedEvent,
|
MethodExecutionStartedEvent,
|
||||||
)
|
)
|
||||||
from crewai.utilities.printer import Printer
|
from crewai.flow.flow_visualizer import plot_flow
|
||||||
|
from crewai.flow.utils import get_possible_return_constants
|
||||||
logger = logging.getLogger(__name__)
|
from crewai.telemetry import Telemetry
|
||||||
|
|
||||||
|
|
||||||
class FlowState(BaseModel):
|
class FlowState(BaseModel):
|
||||||
"""Base model for all flow states, ensuring each state has a unique ID."""
|
"""Base model for all flow states, ensuring each state has a unique ID."""
|
||||||
|
id: str = Field(default_factory=lambda: str(uuid4()), description="Unique identifier for the flow state")
|
||||||
|
|
||||||
id: str = Field(
|
T = TypeVar("T", bound=Union[FlowState, Dict[str, Any]])
|
||||||
default_factory=lambda: str(uuid4()),
|
|
||||||
description="Unique identifier for the flow state",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Type variables with explicit bounds
|
|
||||||
T = TypeVar(
|
|
||||||
"T", bound=Union[Dict[str, Any], BaseModel]
|
|
||||||
) # Generic flow state type parameter
|
|
||||||
StateT = TypeVar(
|
|
||||||
"StateT", bound=Union[Dict[str, Any], BaseModel]
|
|
||||||
) # State validation type parameter
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_state_type(state: Any, expected_type: Type[StateT]) -> StateT:
|
|
||||||
"""Ensure state matches expected type with proper validation.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
state: State instance to validate
|
|
||||||
expected_type: Expected type for the state
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Validated state instance
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
TypeError: If state doesn't match expected type
|
|
||||||
ValueError: If state validation fails
|
|
||||||
"""
|
|
||||||
"""Ensure state matches expected type with proper validation.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
state: State instance to validate
|
|
||||||
expected_type: Expected type for the state
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Validated state instance
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
TypeError: If state doesn't match expected type
|
|
||||||
ValueError: If state validation fails
|
|
||||||
"""
|
|
||||||
if expected_type is dict:
|
|
||||||
if not isinstance(state, dict):
|
|
||||||
raise TypeError(f"Expected dict, got {type(state).__name__}")
|
|
||||||
return cast(StateT, state)
|
|
||||||
if isinstance(expected_type, type) and issubclass(expected_type, BaseModel):
|
|
||||||
if not isinstance(state, expected_type):
|
|
||||||
raise TypeError(
|
|
||||||
f"Expected {expected_type.__name__}, got {type(state).__name__}"
|
|
||||||
)
|
|
||||||
return cast(StateT, state)
|
|
||||||
raise TypeError(f"Invalid expected_type: {expected_type}")
|
|
||||||
|
|
||||||
|
|
||||||
def start(condition: Optional[Union[str, dict, Callable]] = None) -> Callable:
|
def start(condition: Optional[Union[str, dict, Callable]] = None) -> Callable:
|
||||||
@@ -136,7 +77,6 @@ def start(condition: Optional[Union[str, dict, Callable]] = None) -> Callable:
|
|||||||
>>> def complex_start(self):
|
>>> def complex_start(self):
|
||||||
... pass
|
... pass
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def decorator(func):
|
def decorator(func):
|
||||||
func.__is_start_method__ = True
|
func.__is_start_method__ = True
|
||||||
if condition is not None:
|
if condition is not None:
|
||||||
@@ -161,7 +101,6 @@ def start(condition: Optional[Union[str, dict, Callable]] = None) -> Callable:
|
|||||||
|
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
def listen(condition: Union[str, dict, Callable]) -> Callable:
|
def listen(condition: Union[str, dict, Callable]) -> Callable:
|
||||||
"""
|
"""
|
||||||
Creates a listener that executes when specified conditions are met.
|
Creates a listener that executes when specified conditions are met.
|
||||||
@@ -198,7 +137,6 @@ def listen(condition: Union[str, dict, Callable]) -> Callable:
|
|||||||
>>> def handle_completion(self):
|
>>> def handle_completion(self):
|
||||||
... pass
|
... pass
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def decorator(func):
|
def decorator(func):
|
||||||
if isinstance(condition, str):
|
if isinstance(condition, str):
|
||||||
func.__trigger_methods__ = [condition]
|
func.__trigger_methods__ = [condition]
|
||||||
@@ -263,7 +201,6 @@ def router(condition: Union[str, dict, Callable]) -> Callable:
|
|||||||
... return CONTINUE
|
... return CONTINUE
|
||||||
... return STOP
|
... return STOP
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def decorator(func):
|
def decorator(func):
|
||||||
func.__is_router__ = True
|
func.__is_router__ = True
|
||||||
if isinstance(condition, str):
|
if isinstance(condition, str):
|
||||||
@@ -287,7 +224,6 @@ def router(condition: Union[str, dict, Callable]) -> Callable:
|
|||||||
|
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
def or_(*conditions: Union[str, dict, Callable]) -> dict:
|
def or_(*conditions: Union[str, dict, Callable]) -> dict:
|
||||||
"""
|
"""
|
||||||
Combines multiple conditions with OR logic for flow control.
|
Combines multiple conditions with OR logic for flow control.
|
||||||
@@ -390,31 +326,21 @@ class FlowMeta(type):
|
|||||||
routers = set()
|
routers = set()
|
||||||
|
|
||||||
for attr_name, attr_value in dct.items():
|
for attr_name, attr_value in dct.items():
|
||||||
# Check for any flow-related attributes
|
if hasattr(attr_value, "__is_start_method__"):
|
||||||
if (
|
start_methods.append(attr_name)
|
||||||
hasattr(attr_value, "__is_flow_method__")
|
|
||||||
or hasattr(attr_value, "__is_start_method__")
|
|
||||||
or hasattr(attr_value, "__trigger_methods__")
|
|
||||||
or hasattr(attr_value, "__is_router__")
|
|
||||||
):
|
|
||||||
# Register start methods
|
|
||||||
if hasattr(attr_value, "__is_start_method__"):
|
|
||||||
start_methods.append(attr_name)
|
|
||||||
|
|
||||||
# Register listeners and routers
|
|
||||||
if hasattr(attr_value, "__trigger_methods__"):
|
if hasattr(attr_value, "__trigger_methods__"):
|
||||||
methods = attr_value.__trigger_methods__
|
methods = attr_value.__trigger_methods__
|
||||||
condition_type = getattr(attr_value, "__condition_type__", "OR")
|
condition_type = getattr(attr_value, "__condition_type__", "OR")
|
||||||
listeners[attr_name] = (condition_type, methods)
|
listeners[attr_name] = (condition_type, methods)
|
||||||
|
elif hasattr(attr_value, "__trigger_methods__"):
|
||||||
if (
|
methods = attr_value.__trigger_methods__
|
||||||
hasattr(attr_value, "__is_router__")
|
condition_type = getattr(attr_value, "__condition_type__", "OR")
|
||||||
and attr_value.__is_router__
|
listeners[attr_name] = (condition_type, methods)
|
||||||
):
|
if hasattr(attr_value, "__is_router__") and attr_value.__is_router__:
|
||||||
routers.add(attr_name)
|
routers.add(attr_name)
|
||||||
possible_returns = get_possible_return_constants(attr_value)
|
possible_returns = get_possible_return_constants(attr_value)
|
||||||
if possible_returns:
|
if possible_returns:
|
||||||
router_paths[attr_name] = possible_returns
|
router_paths[attr_name] = possible_returns
|
||||||
|
|
||||||
setattr(cls, "_start_methods", start_methods)
|
setattr(cls, "_start_methods", start_methods)
|
||||||
setattr(cls, "_listeners", listeners)
|
setattr(cls, "_listeners", listeners)
|
||||||
@@ -425,17 +351,14 @@ class FlowMeta(type):
|
|||||||
|
|
||||||
|
|
||||||
class Flow(Generic[T], metaclass=FlowMeta):
|
class Flow(Generic[T], metaclass=FlowMeta):
|
||||||
"""Base class for all flows.
|
_telemetry = Telemetry()
|
||||||
|
|
||||||
Type parameter T must be either Dict[str, Any] or a subclass of BaseModel."""
|
|
||||||
|
|
||||||
_printer = Printer()
|
|
||||||
|
|
||||||
_start_methods: List[str] = []
|
_start_methods: List[str] = []
|
||||||
_listeners: Dict[str, tuple[str, List[str]]] = {}
|
_listeners: Dict[str, tuple[str, List[str]]] = {}
|
||||||
_routers: Set[str] = set()
|
_routers: Set[str] = set()
|
||||||
_router_paths: Dict[str, List[str]] = {}
|
_router_paths: Dict[str, List[str]] = {}
|
||||||
initial_state: Union[Type[T], T, None] = None
|
initial_state: Union[Type[T], T, None] = None
|
||||||
|
event_emitter = Signal("event_emitter")
|
||||||
|
|
||||||
def __class_getitem__(cls: Type["Flow"], item: Type[T]) -> Type["Flow"]:
|
def __class_getitem__(cls: Type["Flow"], item: Type[T]) -> Type["Flow"]:
|
||||||
class _FlowGeneric(cls): # type: ignore
|
class _FlowGeneric(cls): # type: ignore
|
||||||
@@ -444,139 +367,53 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
_FlowGeneric.__name__ = f"{cls.__name__}[{item.__name__}]"
|
_FlowGeneric.__name__ = f"{cls.__name__}[{item.__name__}]"
|
||||||
return _FlowGeneric
|
return _FlowGeneric
|
||||||
|
|
||||||
def __init__(
|
def __init__(self) -> None:
|
||||||
self,
|
|
||||||
persistence: Optional[FlowPersistence] = None,
|
|
||||||
**kwargs: Any,
|
|
||||||
) -> None:
|
|
||||||
"""Initialize a new Flow instance.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
persistence: Optional persistence backend for storing flow states
|
|
||||||
**kwargs: Additional state values to initialize or override
|
|
||||||
"""
|
|
||||||
# Initialize basic instance attributes
|
|
||||||
self._methods: Dict[str, Callable] = {}
|
self._methods: Dict[str, Callable] = {}
|
||||||
|
self._state: T = self._create_initial_state()
|
||||||
self._method_execution_counts: Dict[str, int] = {}
|
self._method_execution_counts: Dict[str, int] = {}
|
||||||
self._pending_and_listeners: Dict[str, Set[str]] = {}
|
self._pending_and_listeners: Dict[str, Set[str]] = {}
|
||||||
self._method_outputs: List[Any] = [] # List to store all method outputs
|
self._method_outputs: List[Any] = [] # List to store all method outputs
|
||||||
self._persistence: Optional[FlowPersistence] = persistence
|
|
||||||
|
|
||||||
# Initialize state with initial values
|
self._telemetry.flow_creation_span(self.__class__.__name__)
|
||||||
self._state = self._create_initial_state()
|
|
||||||
|
|
||||||
# Apply any additional kwargs
|
|
||||||
if kwargs:
|
|
||||||
self._initialize_state(kwargs)
|
|
||||||
|
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
FlowCreatedEvent(
|
|
||||||
type="flow_created",
|
|
||||||
flow_name=self.__class__.__name__,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Register all flow-related methods
|
|
||||||
for method_name in dir(self):
|
for method_name in dir(self):
|
||||||
if not method_name.startswith("_"):
|
if callable(getattr(self, method_name)) and not method_name.startswith(
|
||||||
method = getattr(self, method_name)
|
"__"
|
||||||
# Check for any flow-related attributes
|
):
|
||||||
if (
|
self._methods[method_name] = getattr(self, method_name)
|
||||||
hasattr(method, "__is_flow_method__")
|
|
||||||
or hasattr(method, "__is_start_method__")
|
|
||||||
or hasattr(method, "__trigger_methods__")
|
|
||||||
or hasattr(method, "__is_router__")
|
|
||||||
):
|
|
||||||
# Ensure method is bound to this instance
|
|
||||||
if not hasattr(method, "__self__"):
|
|
||||||
method = method.__get__(self, self.__class__)
|
|
||||||
self._methods[method_name] = method
|
|
||||||
|
|
||||||
def _create_initial_state(self) -> T:
|
def _create_initial_state(self) -> T:
|
||||||
"""Create and initialize flow state with UUID and default values.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
New state instance with UUID and default values initialized
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If structured state model lacks 'id' field
|
|
||||||
TypeError: If state is neither BaseModel nor dictionary
|
|
||||||
"""
|
|
||||||
# Handle case where initial_state is None but we have a type parameter
|
# Handle case where initial_state is None but we have a type parameter
|
||||||
if self.initial_state is None and hasattr(self, "_initial_state_T"):
|
if self.initial_state is None and hasattr(self, "_initial_state_T"):
|
||||||
state_type = getattr(self, "_initial_state_T")
|
state_type = getattr(self, "_initial_state_T")
|
||||||
if isinstance(state_type, type):
|
if isinstance(state_type, type):
|
||||||
if issubclass(state_type, FlowState):
|
if issubclass(state_type, FlowState):
|
||||||
# Create instance without id, then set it
|
return state_type() # type: ignore
|
||||||
instance = state_type()
|
|
||||||
if not hasattr(instance, "id"):
|
|
||||||
setattr(instance, "id", str(uuid4()))
|
|
||||||
return cast(T, instance)
|
|
||||||
elif issubclass(state_type, BaseModel):
|
elif issubclass(state_type, BaseModel):
|
||||||
# Create a new type that includes the ID field
|
# Create a new type that includes the ID field
|
||||||
class StateWithId(state_type, FlowState): # type: ignore
|
class StateWithId(state_type, FlowState): # type: ignore
|
||||||
pass
|
pass
|
||||||
|
return StateWithId() # type: ignore
|
||||||
instance = StateWithId()
|
|
||||||
if not hasattr(instance, "id"):
|
|
||||||
setattr(instance, "id", str(uuid4()))
|
|
||||||
return cast(T, instance)
|
|
||||||
elif state_type is dict:
|
|
||||||
return cast(T, {"id": str(uuid4())})
|
|
||||||
|
|
||||||
# Handle case where no initial state is provided
|
# Handle case where no initial state is provided
|
||||||
if self.initial_state is None:
|
if self.initial_state is None:
|
||||||
return cast(T, {"id": str(uuid4())})
|
return {"id": str(uuid4())} # type: ignore
|
||||||
|
|
||||||
# Handle case where initial_state is a type (class)
|
# Handle case where initial_state is a type (class)
|
||||||
if isinstance(self.initial_state, type):
|
if isinstance(self.initial_state, type):
|
||||||
if issubclass(self.initial_state, FlowState):
|
if issubclass(self.initial_state, FlowState):
|
||||||
return cast(T, self.initial_state()) # Uses model defaults
|
return self.initial_state() # type: ignore
|
||||||
elif issubclass(self.initial_state, BaseModel):
|
elif issubclass(self.initial_state, BaseModel):
|
||||||
# Validate that the model has an id field
|
# Create a new type that includes the ID field
|
||||||
model_fields = getattr(self.initial_state, "model_fields", None)
|
class StateWithId(self.initial_state, FlowState): # type: ignore
|
||||||
if not model_fields or "id" not in model_fields:
|
pass
|
||||||
raise ValueError("Flow state model must have an 'id' field")
|
return StateWithId() # type: ignore
|
||||||
return cast(T, self.initial_state()) # Uses model defaults
|
|
||||||
elif self.initial_state is dict:
|
|
||||||
return cast(T, {"id": str(uuid4())})
|
|
||||||
|
|
||||||
# Handle dictionary instance case
|
# Handle dictionary case
|
||||||
if isinstance(self.initial_state, dict):
|
if isinstance(self.initial_state, dict) and "id" not in self.initial_state:
|
||||||
new_state = dict(self.initial_state) # Copy to avoid mutations
|
self.initial_state["id"] = str(uuid4())
|
||||||
if "id" not in new_state:
|
|
||||||
new_state["id"] = str(uuid4())
|
|
||||||
return cast(T, new_state)
|
|
||||||
|
|
||||||
# Handle BaseModel instance case
|
return self.initial_state # type: ignore
|
||||||
if isinstance(self.initial_state, BaseModel):
|
|
||||||
model = cast(BaseModel, self.initial_state)
|
|
||||||
if not hasattr(model, "id"):
|
|
||||||
raise ValueError("Flow state model must have an 'id' field")
|
|
||||||
|
|
||||||
# Create new instance with same values to avoid mutations
|
|
||||||
if hasattr(model, "model_dump"):
|
|
||||||
# Pydantic v2
|
|
||||||
state_dict = model.model_dump()
|
|
||||||
elif hasattr(model, "dict"):
|
|
||||||
# Pydantic v1
|
|
||||||
state_dict = model.dict()
|
|
||||||
else:
|
|
||||||
# Fallback for other BaseModel implementations
|
|
||||||
state_dict = {
|
|
||||||
k: v for k, v in model.__dict__.items() if not k.startswith("_")
|
|
||||||
}
|
|
||||||
|
|
||||||
# Create new instance of the same class
|
|
||||||
model_class = type(model)
|
|
||||||
return cast(T, model_class(**state_dict))
|
|
||||||
raise TypeError(
|
|
||||||
f"Initial state must be dict or BaseModel, got {type(self.initial_state)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def _copy_state(self) -> T:
|
|
||||||
return copy.deepcopy(self._state)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def state(self) -> T:
|
def state(self) -> T:
|
||||||
@@ -587,198 +424,72 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
"""Returns the list of all outputs from executed methods."""
|
"""Returns the list of all outputs from executed methods."""
|
||||||
return self._method_outputs
|
return self._method_outputs
|
||||||
|
|
||||||
@property
|
|
||||||
def flow_id(self) -> str:
|
|
||||||
"""Returns the unique identifier of this flow instance.
|
|
||||||
|
|
||||||
This property provides a consistent way to access the flow's unique identifier
|
|
||||||
regardless of the underlying state implementation (dict or BaseModel).
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The flow's unique identifier, or an empty string if not found
|
|
||||||
|
|
||||||
Note:
|
|
||||||
This property safely handles both dictionary and BaseModel state types,
|
|
||||||
returning an empty string if the ID cannot be retrieved rather than raising
|
|
||||||
an exception.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
```python
|
|
||||||
flow = MyFlow()
|
|
||||||
print(f"Current flow ID: {flow.flow_id}") # Safely get flow ID
|
|
||||||
```
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if not hasattr(self, "_state"):
|
|
||||||
return ""
|
|
||||||
|
|
||||||
if isinstance(self._state, dict):
|
|
||||||
return str(self._state.get("id", ""))
|
|
||||||
elif isinstance(self._state, BaseModel):
|
|
||||||
return str(getattr(self._state, "id", ""))
|
|
||||||
return ""
|
|
||||||
except (AttributeError, TypeError):
|
|
||||||
return "" # Safely handle any unexpected attribute access issues
|
|
||||||
|
|
||||||
def _initialize_state(self, inputs: Dict[str, Any]) -> None:
|
def _initialize_state(self, inputs: Dict[str, Any]) -> None:
|
||||||
"""Initialize or update flow state with new inputs.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
inputs: Dictionary of state values to set/update
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If validation fails for structured state
|
|
||||||
TypeError: If state is neither BaseModel nor dictionary
|
|
||||||
"""
|
|
||||||
if isinstance(self._state, dict):
|
if isinstance(self._state, dict):
|
||||||
# For dict states, preserve existing fields unless overridden
|
# Preserve the ID when updating unstructured state
|
||||||
current_id = self._state.get("id")
|
current_id = self._state.get("id")
|
||||||
# Only update specified fields
|
self._state.update(inputs)
|
||||||
for k, v in inputs.items():
|
|
||||||
self._state[k] = v
|
|
||||||
# Ensure ID is preserved or generated
|
|
||||||
if current_id:
|
if current_id:
|
||||||
self._state["id"] = current_id
|
self._state["id"] = current_id
|
||||||
elif "id" not in self._state:
|
elif "id" not in self._state:
|
||||||
self._state["id"] = str(uuid4())
|
self._state["id"] = str(uuid4())
|
||||||
elif isinstance(self._state, BaseModel):
|
elif isinstance(self._state, BaseModel):
|
||||||
# For BaseModel states, preserve existing fields unless overridden
|
# Structured state
|
||||||
try:
|
try:
|
||||||
model = cast(BaseModel, self._state)
|
def create_model_with_extra_forbid(
|
||||||
# Get current state as dict
|
base_model: Type[BaseModel],
|
||||||
if hasattr(model, "model_dump"):
|
) -> Type[BaseModel]:
|
||||||
current_state = model.model_dump()
|
class ModelWithExtraForbid(base_model): # type: ignore
|
||||||
elif hasattr(model, "dict"):
|
model_config = base_model.model_config.copy()
|
||||||
current_state = model.dict()
|
model_config["extra"] = "forbid"
|
||||||
else:
|
|
||||||
current_state = {
|
return ModelWithExtraForbid
|
||||||
k: v for k, v in model.__dict__.items() if not k.startswith("_")
|
|
||||||
|
# Get current state as dict, preserving the ID if it exists
|
||||||
|
state_model = cast(BaseModel, self._state)
|
||||||
|
current_state = (
|
||||||
|
state_model.model_dump()
|
||||||
|
if hasattr(state_model, "model_dump")
|
||||||
|
else state_model.dict()
|
||||||
|
if hasattr(state_model, "dict")
|
||||||
|
else {
|
||||||
|
k: v
|
||||||
|
for k, v in state_model.__dict__.items()
|
||||||
|
if not k.startswith("_")
|
||||||
}
|
}
|
||||||
|
)
|
||||||
|
|
||||||
# Create new state with preserved fields and updates
|
ModelWithExtraForbid = create_model_with_extra_forbid(
|
||||||
new_state = {**current_state, **inputs}
|
self._state.__class__
|
||||||
|
)
|
||||||
# Create new instance with merged state
|
self._state = cast(
|
||||||
model_class = type(model)
|
T, ModelWithExtraForbid(**{**current_state, **inputs})
|
||||||
if hasattr(model_class, "model_validate"):
|
)
|
||||||
# Pydantic v2
|
|
||||||
self._state = cast(T, model_class.model_validate(new_state))
|
|
||||||
elif hasattr(model_class, "parse_obj"):
|
|
||||||
# Pydantic v1
|
|
||||||
self._state = cast(T, model_class.parse_obj(new_state))
|
|
||||||
else:
|
|
||||||
# Fallback for other BaseModel implementations
|
|
||||||
self._state = cast(T, model_class(**new_state))
|
|
||||||
except ValidationError as e:
|
except ValidationError as e:
|
||||||
raise ValueError(f"Invalid inputs for structured state: {e}") from e
|
raise ValueError(f"Invalid inputs for structured state: {e}") from e
|
||||||
else:
|
else:
|
||||||
raise TypeError("State must be a BaseModel instance or a dictionary.")
|
raise TypeError("State must be a BaseModel instance or a dictionary.")
|
||||||
|
|
||||||
def _restore_state(self, stored_state: Dict[str, Any]) -> None:
|
|
||||||
"""Restore flow state from persistence.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
stored_state: Previously stored state to restore
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If validation fails for structured state
|
|
||||||
TypeError: If state is neither BaseModel nor dictionary
|
|
||||||
"""
|
|
||||||
# When restoring from persistence, use the stored ID
|
|
||||||
stored_id = stored_state.get("id")
|
|
||||||
if not stored_id:
|
|
||||||
raise ValueError("Stored state must have an 'id' field")
|
|
||||||
|
|
||||||
if isinstance(self._state, dict):
|
|
||||||
# For dict states, update all fields from stored state
|
|
||||||
self._state.clear()
|
|
||||||
self._state.update(stored_state)
|
|
||||||
elif isinstance(self._state, BaseModel):
|
|
||||||
# For BaseModel states, create new instance with stored values
|
|
||||||
model = cast(BaseModel, self._state)
|
|
||||||
if hasattr(model, "model_validate"):
|
|
||||||
# Pydantic v2
|
|
||||||
self._state = cast(T, type(model).model_validate(stored_state))
|
|
||||||
elif hasattr(model, "parse_obj"):
|
|
||||||
# Pydantic v1
|
|
||||||
self._state = cast(T, type(model).parse_obj(stored_state))
|
|
||||||
else:
|
|
||||||
# Fallback for other BaseModel implementations
|
|
||||||
self._state = cast(T, type(model)(**stored_state))
|
|
||||||
else:
|
|
||||||
raise TypeError(f"State must be dict or BaseModel, got {type(self._state)}")
|
|
||||||
|
|
||||||
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||||
"""
|
self.event_emitter.send(
|
||||||
Start the flow execution in a synchronous context.
|
|
||||||
|
|
||||||
This method wraps kickoff_async so that all state initialization and event
|
|
||||||
emission is handled in the asynchronous method.
|
|
||||||
"""
|
|
||||||
|
|
||||||
async def run_flow():
|
|
||||||
return await self.kickoff_async(inputs)
|
|
||||||
|
|
||||||
return asyncio.run(run_flow())
|
|
||||||
|
|
||||||
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
|
||||||
"""
|
|
||||||
Start the flow execution asynchronously.
|
|
||||||
|
|
||||||
This method performs state restoration (if an 'id' is provided and persistence is available)
|
|
||||||
and updates the flow state with any additional inputs. It then emits the FlowStartedEvent,
|
|
||||||
logs the flow startup, and executes all start methods. Once completed, it emits the
|
|
||||||
FlowFinishedEvent and returns the final output.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
inputs: Optional dictionary containing input values and/or a state ID for restoration.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The final output from the flow, which is the result of the last executed method.
|
|
||||||
"""
|
|
||||||
if inputs:
|
|
||||||
# Override the id in the state if it exists in inputs
|
|
||||||
if "id" in inputs:
|
|
||||||
if isinstance(self._state, dict):
|
|
||||||
self._state["id"] = inputs["id"]
|
|
||||||
elif isinstance(self._state, BaseModel):
|
|
||||||
setattr(self._state, "id", inputs["id"])
|
|
||||||
|
|
||||||
# If persistence is enabled, attempt to restore the stored state using the provided id.
|
|
||||||
if "id" in inputs and self._persistence is not None:
|
|
||||||
restore_uuid = inputs["id"]
|
|
||||||
stored_state = self._persistence.load_state(restore_uuid)
|
|
||||||
if stored_state:
|
|
||||||
self._log_flow_event(
|
|
||||||
f"Loading flow state from memory for UUID: {restore_uuid}",
|
|
||||||
color="yellow",
|
|
||||||
)
|
|
||||||
self._restore_state(stored_state)
|
|
||||||
else:
|
|
||||||
self._log_flow_event(
|
|
||||||
f"No flow state found for UUID: {restore_uuid}", color="red"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Update state with any additional inputs (ignoring the 'id' key)
|
|
||||||
filtered_inputs = {k: v for k, v in inputs.items() if k != "id"}
|
|
||||||
if filtered_inputs:
|
|
||||||
self._initialize_state(filtered_inputs)
|
|
||||||
|
|
||||||
# Emit FlowStartedEvent and log the start of the flow.
|
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
self,
|
||||||
FlowStartedEvent(
|
event=FlowStartedEvent(
|
||||||
type="flow_started",
|
type="flow_started",
|
||||||
flow_name=self.__class__.__name__,
|
flow_name=self.__class__.__name__,
|
||||||
inputs=inputs,
|
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
self._log_flow_event(
|
|
||||||
f"Flow started with ID: {self.flow_id}", color="bold_magenta"
|
|
||||||
)
|
|
||||||
|
|
||||||
if inputs is not None and "id" not in inputs:
|
if inputs is not None:
|
||||||
self._initialize_state(inputs)
|
self._initialize_state(inputs)
|
||||||
|
return asyncio.run(self.kickoff_async())
|
||||||
|
|
||||||
|
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||||
|
if not self._start_methods:
|
||||||
|
raise ValueError("No start method defined")
|
||||||
|
|
||||||
|
self._telemetry.flow_execution_span(
|
||||||
|
self.__class__.__name__, list(self._methods.keys())
|
||||||
|
)
|
||||||
|
|
||||||
tasks = [
|
tasks = [
|
||||||
self._execute_start_method(start_method)
|
self._execute_start_method(start_method)
|
||||||
@@ -788,15 +499,14 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
|
|
||||||
final_output = self._method_outputs[-1] if self._method_outputs else None
|
final_output = self._method_outputs[-1] if self._method_outputs else None
|
||||||
|
|
||||||
crewai_event_bus.emit(
|
self.event_emitter.send(
|
||||||
self,
|
self,
|
||||||
FlowFinishedEvent(
|
event=FlowFinishedEvent(
|
||||||
type="flow_finished",
|
type="flow_finished",
|
||||||
flow_name=self.__class__.__name__,
|
flow_name=self.__class__.__name__,
|
||||||
result=final_output,
|
result=final_output,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
return final_output
|
return final_output
|
||||||
|
|
||||||
async def _execute_start_method(self, start_method_name: str) -> None:
|
async def _execute_start_method(self, start_method_name: str) -> None:
|
||||||
@@ -825,55 +535,16 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
async def _execute_method(
|
async def _execute_method(
|
||||||
self, method_name: str, method: Callable, *args: Any, **kwargs: Any
|
self, method_name: str, method: Callable, *args: Any, **kwargs: Any
|
||||||
) -> Any:
|
) -> Any:
|
||||||
try:
|
result = (
|
||||||
dumped_params = {f"_{i}": arg for i, arg in enumerate(args)} | (
|
await method(*args, **kwargs)
|
||||||
kwargs or {}
|
if asyncio.iscoroutinefunction(method)
|
||||||
)
|
else method(*args, **kwargs)
|
||||||
crewai_event_bus.emit(
|
)
|
||||||
self,
|
self._method_outputs.append(result)
|
||||||
MethodExecutionStartedEvent(
|
self._method_execution_counts[method_name] = (
|
||||||
type="method_execution_started",
|
self._method_execution_counts.get(method_name, 0) + 1
|
||||||
method_name=method_name,
|
)
|
||||||
flow_name=self.__class__.__name__,
|
return result
|
||||||
params=dumped_params,
|
|
||||||
state=self._copy_state(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
result = (
|
|
||||||
await method(*args, **kwargs)
|
|
||||||
if asyncio.iscoroutinefunction(method)
|
|
||||||
else method(*args, **kwargs)
|
|
||||||
)
|
|
||||||
|
|
||||||
self._method_outputs.append(result)
|
|
||||||
self._method_execution_counts[method_name] = (
|
|
||||||
self._method_execution_counts.get(method_name, 0) + 1
|
|
||||||
)
|
|
||||||
|
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
MethodExecutionFinishedEvent(
|
|
||||||
type="method_execution_finished",
|
|
||||||
method_name=method_name,
|
|
||||||
flow_name=self.__class__.__name__,
|
|
||||||
state=self._copy_state(),
|
|
||||||
result=result,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
return result
|
|
||||||
except Exception as e:
|
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
MethodExecutionFailedEvent(
|
|
||||||
type="method_execution_failed",
|
|
||||||
method_name=method_name,
|
|
||||||
flow_name=self.__class__.__name__,
|
|
||||||
error=e,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
raise e
|
|
||||||
|
|
||||||
async def _execute_listeners(self, trigger_method: str, result: Any) -> None:
|
async def _execute_listeners(self, trigger_method: str, result: Any) -> None:
|
||||||
"""
|
"""
|
||||||
@@ -1012,6 +683,15 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
try:
|
try:
|
||||||
method = self._methods[listener_name]
|
method = self._methods[listener_name]
|
||||||
|
|
||||||
|
self.event_emitter.send(
|
||||||
|
self,
|
||||||
|
event=MethodExecutionStartedEvent(
|
||||||
|
type="method_execution_started",
|
||||||
|
method_name=listener_name,
|
||||||
|
flow_name=self.__class__.__name__,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
sig = inspect.signature(method)
|
sig = inspect.signature(method)
|
||||||
params = list(sig.parameters.values())
|
params = list(sig.parameters.values())
|
||||||
method_params = [p for p in params if p.name != "self"]
|
method_params = [p for p in params if p.name != "self"]
|
||||||
@@ -1023,6 +703,15 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
else:
|
else:
|
||||||
listener_result = await self._execute_method(listener_name, method)
|
listener_result = await self._execute_method(listener_name, method)
|
||||||
|
|
||||||
|
self.event_emitter.send(
|
||||||
|
self,
|
||||||
|
event=MethodExecutionFinishedEvent(
|
||||||
|
type="method_execution_finished",
|
||||||
|
method_name=listener_name,
|
||||||
|
flow_name=self.__class__.__name__,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
# Execute listeners (and possibly routers) of this listener
|
# Execute listeners (and possibly routers) of this listener
|
||||||
await self._execute_listeners(listener_name, listener_result)
|
await self._execute_listeners(listener_name, listener_result)
|
||||||
|
|
||||||
@@ -1034,38 +723,8 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
|
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
def _log_flow_event(
|
|
||||||
self, message: str, color: str = "yellow", level: str = "info"
|
|
||||||
) -> None:
|
|
||||||
"""Centralized logging method for flow events.
|
|
||||||
|
|
||||||
This method provides a consistent interface for logging flow-related events,
|
|
||||||
combining both console output with colors and proper logging levels.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
message: The message to log
|
|
||||||
color: Color to use for console output (default: yellow)
|
|
||||||
Available colors: purple, red, bold_green, bold_purple,
|
|
||||||
bold_blue, yellow, yellow
|
|
||||||
level: Log level to use (default: info)
|
|
||||||
Supported levels: info, warning
|
|
||||||
|
|
||||||
Note:
|
|
||||||
This method uses the Printer utility for colored console output
|
|
||||||
and the standard logging module for log level support.
|
|
||||||
"""
|
|
||||||
self._printer.print(message, color=color)
|
|
||||||
if level == "info":
|
|
||||||
logger.info(message)
|
|
||||||
elif level == "warning":
|
|
||||||
logger.warning(message)
|
|
||||||
|
|
||||||
def plot(self, filename: str = "crewai_flow") -> None:
|
def plot(self, filename: str = "crewai_flow") -> None:
|
||||||
crewai_event_bus.emit(
|
self._telemetry.flow_plotting_span(
|
||||||
self,
|
self.__class__.__name__, list(self._methods.keys())
|
||||||
FlowPlotEvent(
|
|
||||||
type="flow_plot",
|
|
||||||
flow_name=self.__class__.__name__,
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
plot_flow(self, filename)
|
plot_flow(self, filename)
|
||||||
|
|||||||
33
src/crewai/flow/flow_events.py
Normal file
33
src/crewai/flow/flow_events.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
from dataclasses import dataclass, field
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Event:
|
||||||
|
type: str
|
||||||
|
flow_name: str
|
||||||
|
timestamp: datetime = field(init=False)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
self.timestamp = datetime.now()
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class FlowStartedEvent(Event):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MethodExecutionStartedEvent(Event):
|
||||||
|
method_name: str
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MethodExecutionFinishedEvent(Event):
|
||||||
|
method_name: str
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class FlowFinishedEvent(Event):
|
||||||
|
result: Optional[Any] = None
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
"""
|
|
||||||
CrewAI Flow Persistence.
|
|
||||||
|
|
||||||
This module provides interfaces and implementations for persisting flow states.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Any, Dict, TypeVar, Union
|
|
||||||
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from crewai.flow.persistence.base import FlowPersistence
|
|
||||||
from crewai.flow.persistence.decorators import persist
|
|
||||||
from crewai.flow.persistence.sqlite import SQLiteFlowPersistence
|
|
||||||
|
|
||||||
__all__ = ["FlowPersistence", "persist", "SQLiteFlowPersistence"]
|
|
||||||
|
|
||||||
StateType = TypeVar('StateType', bound=Union[Dict[str, Any], BaseModel])
|
|
||||||
DictStateType = Dict[str, Any]
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
"""Base class for flow state persistence."""
|
|
||||||
|
|
||||||
import abc
|
|
||||||
from typing import Any, Dict, Optional, Union
|
|
||||||
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
|
|
||||||
class FlowPersistence(abc.ABC):
|
|
||||||
"""Abstract base class for flow state persistence.
|
|
||||||
|
|
||||||
This class defines the interface that all persistence implementations must follow.
|
|
||||||
It supports both structured (Pydantic BaseModel) and unstructured (dict) states.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def init_db(self) -> None:
|
|
||||||
"""Initialize the persistence backend.
|
|
||||||
|
|
||||||
This method should handle any necessary setup, such as:
|
|
||||||
- Creating tables
|
|
||||||
- Establishing connections
|
|
||||||
- Setting up indexes
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def save_state(
|
|
||||||
self,
|
|
||||||
flow_uuid: str,
|
|
||||||
method_name: str,
|
|
||||||
state_data: Union[Dict[str, Any], BaseModel]
|
|
||||||
) -> None:
|
|
||||||
"""Persist the flow state after method completion.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
flow_uuid: Unique identifier for the flow instance
|
|
||||||
method_name: Name of the method that just completed
|
|
||||||
state_data: Current state data (either dict or Pydantic model)
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]:
|
|
||||||
"""Load the most recent state for a given flow UUID.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
flow_uuid: Unique identifier for the flow instance
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The most recent state as a dictionary, or None if no state exists
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
@@ -1,254 +0,0 @@
|
|||||||
"""
|
|
||||||
Decorators for flow state persistence.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
```python
|
|
||||||
from crewai.flow.flow import Flow, start
|
|
||||||
from crewai.flow.persistence import persist, SQLiteFlowPersistence
|
|
||||||
|
|
||||||
class MyFlow(Flow):
|
|
||||||
@start()
|
|
||||||
@persist(SQLiteFlowPersistence())
|
|
||||||
def sync_method(self):
|
|
||||||
# Synchronous method implementation
|
|
||||||
pass
|
|
||||||
|
|
||||||
@start()
|
|
||||||
@persist(SQLiteFlowPersistence())
|
|
||||||
async def async_method(self):
|
|
||||||
# Asynchronous method implementation
|
|
||||||
await some_async_operation()
|
|
||||||
```
|
|
||||||
"""
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import functools
|
|
||||||
import logging
|
|
||||||
from typing import (
|
|
||||||
Any,
|
|
||||||
Callable,
|
|
||||||
Optional,
|
|
||||||
Type,
|
|
||||||
TypeVar,
|
|
||||||
Union,
|
|
||||||
cast,
|
|
||||||
)
|
|
||||||
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from crewai.flow.persistence.base import FlowPersistence
|
|
||||||
from crewai.flow.persistence.sqlite import SQLiteFlowPersistence
|
|
||||||
from crewai.utilities.printer import Printer
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
T = TypeVar("T")
|
|
||||||
|
|
||||||
# Constants for log messages
|
|
||||||
LOG_MESSAGES = {
|
|
||||||
"save_state": "Saving flow state to memory for ID: {}",
|
|
||||||
"save_error": "Failed to persist state for method {}: {}",
|
|
||||||
"state_missing": "Flow instance has no state",
|
|
||||||
"id_missing": "Flow state must have an 'id' field for persistence"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class PersistenceDecorator:
|
|
||||||
"""Class to handle flow state persistence with consistent logging."""
|
|
||||||
|
|
||||||
_printer = Printer() # Class-level printer instance
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def persist_state(cls, flow_instance: Any, method_name: str, persistence_instance: FlowPersistence, verbose: bool = False) -> None:
|
|
||||||
"""Persist flow state with proper error handling and logging.
|
|
||||||
|
|
||||||
This method handles the persistence of flow state data, including proper
|
|
||||||
error handling and colored console output for status updates.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
flow_instance: The flow instance whose state to persist
|
|
||||||
method_name: Name of the method that triggered persistence
|
|
||||||
persistence_instance: The persistence backend to use
|
|
||||||
verbose: Whether to log persistence operations
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If flow has no state or state lacks an ID
|
|
||||||
RuntimeError: If state persistence fails
|
|
||||||
AttributeError: If flow instance lacks required state attributes
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
state = getattr(flow_instance, 'state', None)
|
|
||||||
if state is None:
|
|
||||||
raise ValueError("Flow instance has no state")
|
|
||||||
|
|
||||||
flow_uuid: Optional[str] = None
|
|
||||||
if isinstance(state, dict):
|
|
||||||
flow_uuid = state.get('id')
|
|
||||||
elif isinstance(state, BaseModel):
|
|
||||||
flow_uuid = getattr(state, 'id', None)
|
|
||||||
|
|
||||||
if not flow_uuid:
|
|
||||||
raise ValueError("Flow state must have an 'id' field for persistence")
|
|
||||||
|
|
||||||
# Log state saving only if verbose is True
|
|
||||||
if verbose:
|
|
||||||
cls._printer.print(LOG_MESSAGES["save_state"].format(flow_uuid), color="cyan")
|
|
||||||
logger.info(LOG_MESSAGES["save_state"].format(flow_uuid))
|
|
||||||
|
|
||||||
try:
|
|
||||||
persistence_instance.save_state(
|
|
||||||
flow_uuid=flow_uuid,
|
|
||||||
method_name=method_name,
|
|
||||||
state_data=state,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
error_msg = LOG_MESSAGES["save_error"].format(method_name, str(e))
|
|
||||||
cls._printer.print(error_msg, color="red")
|
|
||||||
logger.error(error_msg)
|
|
||||||
raise RuntimeError(f"State persistence failed: {str(e)}") from e
|
|
||||||
except AttributeError:
|
|
||||||
error_msg = LOG_MESSAGES["state_missing"]
|
|
||||||
cls._printer.print(error_msg, color="red")
|
|
||||||
logger.error(error_msg)
|
|
||||||
raise ValueError(error_msg)
|
|
||||||
except (TypeError, ValueError) as e:
|
|
||||||
error_msg = LOG_MESSAGES["id_missing"]
|
|
||||||
cls._printer.print(error_msg, color="red")
|
|
||||||
logger.error(error_msg)
|
|
||||||
raise ValueError(error_msg) from e
|
|
||||||
|
|
||||||
|
|
||||||
def persist(persistence: Optional[FlowPersistence] = None, verbose: bool = False):
|
|
||||||
"""Decorator to persist flow state.
|
|
||||||
|
|
||||||
This decorator can be applied at either the class level or method level.
|
|
||||||
When applied at the class level, it automatically persists all flow method
|
|
||||||
states. When applied at the method level, it persists only that method's
|
|
||||||
state.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
persistence: Optional FlowPersistence implementation to use.
|
|
||||||
If not provided, uses SQLiteFlowPersistence.
|
|
||||||
verbose: Whether to log persistence operations. Defaults to False.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A decorator that can be applied to either a class or method
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If the flow state doesn't have an 'id' field
|
|
||||||
RuntimeError: If state persistence fails
|
|
||||||
|
|
||||||
Example:
|
|
||||||
@persist(verbose=True) # Class-level persistence with logging
|
|
||||||
class MyFlow(Flow[MyState]):
|
|
||||||
@start()
|
|
||||||
def begin(self):
|
|
||||||
pass
|
|
||||||
"""
|
|
||||||
def decorator(target: Union[Type, Callable[..., T]]) -> Union[Type, Callable[..., T]]:
|
|
||||||
"""Decorator that handles both class and method decoration."""
|
|
||||||
actual_persistence = persistence or SQLiteFlowPersistence()
|
|
||||||
|
|
||||||
if isinstance(target, type):
|
|
||||||
# Class decoration
|
|
||||||
original_init = getattr(target, "__init__")
|
|
||||||
|
|
||||||
@functools.wraps(original_init)
|
|
||||||
def new_init(self: Any, *args: Any, **kwargs: Any) -> None:
|
|
||||||
if 'persistence' not in kwargs:
|
|
||||||
kwargs['persistence'] = actual_persistence
|
|
||||||
original_init(self, *args, **kwargs)
|
|
||||||
|
|
||||||
setattr(target, "__init__", new_init)
|
|
||||||
|
|
||||||
# Store original methods to preserve their decorators
|
|
||||||
original_methods = {}
|
|
||||||
|
|
||||||
for name, method in target.__dict__.items():
|
|
||||||
if callable(method) and (
|
|
||||||
hasattr(method, "__is_start_method__") or
|
|
||||||
hasattr(method, "__trigger_methods__") or
|
|
||||||
hasattr(method, "__condition_type__") or
|
|
||||||
hasattr(method, "__is_flow_method__") or
|
|
||||||
hasattr(method, "__is_router__")
|
|
||||||
):
|
|
||||||
original_methods[name] = method
|
|
||||||
|
|
||||||
# Create wrapped versions of the methods that include persistence
|
|
||||||
for name, method in original_methods.items():
|
|
||||||
if asyncio.iscoroutinefunction(method):
|
|
||||||
# Create a closure to capture the current name and method
|
|
||||||
def create_async_wrapper(method_name: str, original_method: Callable):
|
|
||||||
@functools.wraps(original_method)
|
|
||||||
async def method_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
|
||||||
result = await original_method(self, *args, **kwargs)
|
|
||||||
PersistenceDecorator.persist_state(self, method_name, actual_persistence, verbose)
|
|
||||||
return result
|
|
||||||
return method_wrapper
|
|
||||||
|
|
||||||
wrapped = create_async_wrapper(name, method)
|
|
||||||
|
|
||||||
# Preserve all original decorators and attributes
|
|
||||||
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
|
||||||
if hasattr(method, attr):
|
|
||||||
setattr(wrapped, attr, getattr(method, attr))
|
|
||||||
setattr(wrapped, "__is_flow_method__", True)
|
|
||||||
|
|
||||||
# Update the class with the wrapped method
|
|
||||||
setattr(target, name, wrapped)
|
|
||||||
else:
|
|
||||||
# Create a closure to capture the current name and method
|
|
||||||
def create_sync_wrapper(method_name: str, original_method: Callable):
|
|
||||||
@functools.wraps(original_method)
|
|
||||||
def method_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
|
||||||
result = original_method(self, *args, **kwargs)
|
|
||||||
PersistenceDecorator.persist_state(self, method_name, actual_persistence, verbose)
|
|
||||||
return result
|
|
||||||
return method_wrapper
|
|
||||||
|
|
||||||
wrapped = create_sync_wrapper(name, method)
|
|
||||||
|
|
||||||
# Preserve all original decorators and attributes
|
|
||||||
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
|
||||||
if hasattr(method, attr):
|
|
||||||
setattr(wrapped, attr, getattr(method, attr))
|
|
||||||
setattr(wrapped, "__is_flow_method__", True)
|
|
||||||
|
|
||||||
# Update the class with the wrapped method
|
|
||||||
setattr(target, name, wrapped)
|
|
||||||
|
|
||||||
return target
|
|
||||||
else:
|
|
||||||
# Method decoration
|
|
||||||
method = target
|
|
||||||
setattr(method, "__is_flow_method__", True)
|
|
||||||
|
|
||||||
if asyncio.iscoroutinefunction(method):
|
|
||||||
@functools.wraps(method)
|
|
||||||
async def method_async_wrapper(flow_instance: Any, *args: Any, **kwargs: Any) -> T:
|
|
||||||
method_coro = method(flow_instance, *args, **kwargs)
|
|
||||||
if asyncio.iscoroutine(method_coro):
|
|
||||||
result = await method_coro
|
|
||||||
else:
|
|
||||||
result = method_coro
|
|
||||||
PersistenceDecorator.persist_state(flow_instance, method.__name__, actual_persistence, verbose)
|
|
||||||
return result
|
|
||||||
|
|
||||||
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
|
||||||
if hasattr(method, attr):
|
|
||||||
setattr(method_async_wrapper, attr, getattr(method, attr))
|
|
||||||
setattr(method_async_wrapper, "__is_flow_method__", True)
|
|
||||||
return cast(Callable[..., T], method_async_wrapper)
|
|
||||||
else:
|
|
||||||
@functools.wraps(method)
|
|
||||||
def method_sync_wrapper(flow_instance: Any, *args: Any, **kwargs: Any) -> T:
|
|
||||||
result = method(flow_instance, *args, **kwargs)
|
|
||||||
PersistenceDecorator.persist_state(flow_instance, method.__name__, actual_persistence, verbose)
|
|
||||||
return result
|
|
||||||
|
|
||||||
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
|
||||||
if hasattr(method, attr):
|
|
||||||
setattr(method_sync_wrapper, attr, getattr(method, attr))
|
|
||||||
setattr(method_sync_wrapper, "__is_flow_method__", True)
|
|
||||||
return cast(Callable[..., T], method_sync_wrapper)
|
|
||||||
|
|
||||||
return decorator
|
|
||||||
@@ -1,134 +0,0 @@
|
|||||||
"""
|
|
||||||
SQLite-based implementation of flow state persistence.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json
|
|
||||||
import sqlite3
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any, Dict, Optional, Union
|
|
||||||
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from crewai.flow.persistence.base import FlowPersistence
|
|
||||||
|
|
||||||
|
|
||||||
class SQLiteFlowPersistence(FlowPersistence):
|
|
||||||
"""SQLite-based implementation of flow state persistence.
|
|
||||||
|
|
||||||
This class provides a simple, file-based persistence implementation using SQLite.
|
|
||||||
It's suitable for development and testing, or for production use cases with
|
|
||||||
moderate performance requirements.
|
|
||||||
"""
|
|
||||||
|
|
||||||
db_path: str # Type annotation for instance variable
|
|
||||||
|
|
||||||
def __init__(self, db_path: Optional[str] = None):
|
|
||||||
"""Initialize SQLite persistence.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
db_path: Path to the SQLite database file. If not provided, uses
|
|
||||||
db_storage_path() from utilities.paths.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If db_path is invalid
|
|
||||||
"""
|
|
||||||
from crewai.utilities.paths import db_storage_path
|
|
||||||
|
|
||||||
# Get path from argument or default location
|
|
||||||
path = db_path or str(Path(db_storage_path()) / "flow_states.db")
|
|
||||||
|
|
||||||
if not path:
|
|
||||||
raise ValueError("Database path must be provided")
|
|
||||||
|
|
||||||
self.db_path = path # Now mypy knows this is str
|
|
||||||
self.init_db()
|
|
||||||
|
|
||||||
def init_db(self) -> None:
|
|
||||||
"""Create the necessary tables if they don't exist."""
|
|
||||||
with sqlite3.connect(self.db_path) as conn:
|
|
||||||
conn.execute(
|
|
||||||
"""
|
|
||||||
CREATE TABLE IF NOT EXISTS flow_states (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
flow_uuid TEXT NOT NULL,
|
|
||||||
method_name TEXT NOT NULL,
|
|
||||||
timestamp DATETIME NOT NULL,
|
|
||||||
state_json TEXT NOT NULL
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
# Add index for faster UUID lookups
|
|
||||||
conn.execute(
|
|
||||||
"""
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_flow_states_uuid
|
|
||||||
ON flow_states(flow_uuid)
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
def save_state(
|
|
||||||
self,
|
|
||||||
flow_uuid: str,
|
|
||||||
method_name: str,
|
|
||||||
state_data: Union[Dict[str, Any], BaseModel],
|
|
||||||
) -> None:
|
|
||||||
"""Save the current flow state to SQLite.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
flow_uuid: Unique identifier for the flow instance
|
|
||||||
method_name: Name of the method that just completed
|
|
||||||
state_data: Current state data (either dict or Pydantic model)
|
|
||||||
"""
|
|
||||||
# Convert state_data to dict, handling both Pydantic and dict cases
|
|
||||||
if isinstance(state_data, BaseModel):
|
|
||||||
state_dict = dict(state_data) # Use dict() for better type compatibility
|
|
||||||
elif isinstance(state_data, dict):
|
|
||||||
state_dict = state_data
|
|
||||||
else:
|
|
||||||
raise ValueError(
|
|
||||||
f"state_data must be either a Pydantic BaseModel or dict, got {type(state_data)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
with sqlite3.connect(self.db_path) as conn:
|
|
||||||
conn.execute(
|
|
||||||
"""
|
|
||||||
INSERT INTO flow_states (
|
|
||||||
flow_uuid,
|
|
||||||
method_name,
|
|
||||||
timestamp,
|
|
||||||
state_json
|
|
||||||
) VALUES (?, ?, ?, ?)
|
|
||||||
""",
|
|
||||||
(
|
|
||||||
flow_uuid,
|
|
||||||
method_name,
|
|
||||||
datetime.now(timezone.utc).isoformat(),
|
|
||||||
json.dumps(state_dict),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]:
|
|
||||||
"""Load the most recent state for a given flow UUID.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
flow_uuid: Unique identifier for the flow instance
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The most recent state as a dictionary, or None if no state exists
|
|
||||||
"""
|
|
||||||
with sqlite3.connect(self.db_path) as conn:
|
|
||||||
cursor = conn.execute(
|
|
||||||
"""
|
|
||||||
SELECT state_json
|
|
||||||
FROM flow_states
|
|
||||||
WHERE flow_uuid = ?
|
|
||||||
ORDER BY id DESC
|
|
||||||
LIMIT 1
|
|
||||||
""",
|
|
||||||
(flow_uuid,),
|
|
||||||
)
|
|
||||||
row = cursor.fetchone()
|
|
||||||
|
|
||||||
if row:
|
|
||||||
return json.loads(row[0])
|
|
||||||
return None
|
|
||||||
@@ -1,91 +0,0 @@
|
|||||||
import json
|
|
||||||
from datetime import date, datetime
|
|
||||||
from typing import Any, Dict, List, Union
|
|
||||||
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from crewai.flow import Flow
|
|
||||||
|
|
||||||
SerializablePrimitive = Union[str, int, float, bool, None]
|
|
||||||
Serializable = Union[
|
|
||||||
SerializablePrimitive, List["Serializable"], Dict[str, "Serializable"]
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def export_state(flow: Flow) -> dict[str, Serializable]:
|
|
||||||
"""Exports the Flow's internal state as JSON-compatible data structures.
|
|
||||||
|
|
||||||
Performs a one-way transformation of a Flow's state into basic Python types
|
|
||||||
that can be safely serialized to JSON. To prevent infinite recursion with
|
|
||||||
circular references, the conversion is limited to a depth of 5 levels.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
flow: The Flow object whose state needs to be exported
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict[str, Any]: The transformed state using JSON-compatible Python
|
|
||||||
types.
|
|
||||||
"""
|
|
||||||
result = to_serializable(flow._state)
|
|
||||||
assert isinstance(result, dict)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def to_serializable(
|
|
||||||
obj: Any, max_depth: int = 5, _current_depth: int = 0
|
|
||||||
) -> Serializable:
|
|
||||||
"""Converts a Python object into a JSON-compatible representation.
|
|
||||||
|
|
||||||
Supports primitives, datetime objects, collections, dictionaries, and
|
|
||||||
Pydantic models. Recursion depth is limited to prevent infinite nesting.
|
|
||||||
Non-convertible objects default to their string representations.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
obj (Any): Object to transform.
|
|
||||||
max_depth (int, optional): Maximum recursion depth. Defaults to 5.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Serializable: A JSON-compatible structure.
|
|
||||||
"""
|
|
||||||
if _current_depth >= max_depth:
|
|
||||||
return repr(obj)
|
|
||||||
|
|
||||||
if isinstance(obj, (str, int, float, bool, type(None))):
|
|
||||||
return obj
|
|
||||||
elif isinstance(obj, (date, datetime)):
|
|
||||||
return obj.isoformat()
|
|
||||||
elif isinstance(obj, (list, tuple, set)):
|
|
||||||
return [to_serializable(item, max_depth, _current_depth + 1) for item in obj]
|
|
||||||
elif isinstance(obj, dict):
|
|
||||||
return {
|
|
||||||
_to_serializable_key(key): to_serializable(
|
|
||||||
value, max_depth, _current_depth + 1
|
|
||||||
)
|
|
||||||
for key, value in obj.items()
|
|
||||||
}
|
|
||||||
elif isinstance(obj, BaseModel):
|
|
||||||
return to_serializable(obj.model_dump(), max_depth, _current_depth + 1)
|
|
||||||
else:
|
|
||||||
return repr(obj)
|
|
||||||
|
|
||||||
|
|
||||||
def _to_serializable_key(key: Any) -> str:
|
|
||||||
if isinstance(key, (str, int)):
|
|
||||||
return str(key)
|
|
||||||
return f"key_{id(key)}_{repr(key)}"
|
|
||||||
|
|
||||||
|
|
||||||
def to_string(obj: Any) -> str | None:
|
|
||||||
"""Serializes an object into a JSON string.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
obj (Any): Object to serialize.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str | None: A JSON-formatted string or `None` if empty.
|
|
||||||
"""
|
|
||||||
serializable = to_serializable(obj)
|
|
||||||
if serializable is None:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return json.dumps(serializable)
|
|
||||||
@@ -16,8 +16,7 @@ Example
|
|||||||
import ast
|
import ast
|
||||||
import inspect
|
import inspect
|
||||||
import textwrap
|
import textwrap
|
||||||
from collections import defaultdict, deque
|
from typing import Any, Dict, List, Optional, Set, Union
|
||||||
from typing import Any, Deque, Dict, List, Optional, Set, Union
|
|
||||||
|
|
||||||
|
|
||||||
def get_possible_return_constants(function: Any) -> Optional[List[str]]:
|
def get_possible_return_constants(function: Any) -> Optional[List[str]]:
|
||||||
@@ -119,7 +118,7 @@ def calculate_node_levels(flow: Any) -> Dict[str, int]:
|
|||||||
- Processes router paths separately
|
- Processes router paths separately
|
||||||
"""
|
"""
|
||||||
levels: Dict[str, int] = {}
|
levels: Dict[str, int] = {}
|
||||||
queue: Deque[str] = deque()
|
queue: List[str] = []
|
||||||
visited: Set[str] = set()
|
visited: Set[str] = set()
|
||||||
pending_and_listeners: Dict[str, Set[str]] = {}
|
pending_and_listeners: Dict[str, Set[str]] = {}
|
||||||
|
|
||||||
@@ -129,35 +128,28 @@ def calculate_node_levels(flow: Any) -> Dict[str, int]:
|
|||||||
levels[method_name] = 0
|
levels[method_name] = 0
|
||||||
queue.append(method_name)
|
queue.append(method_name)
|
||||||
|
|
||||||
# Precompute listener dependencies
|
|
||||||
or_listeners = defaultdict(list)
|
|
||||||
and_listeners = defaultdict(set)
|
|
||||||
for listener_name, (condition_type, trigger_methods) in flow._listeners.items():
|
|
||||||
if condition_type == "OR":
|
|
||||||
for method in trigger_methods:
|
|
||||||
or_listeners[method].append(listener_name)
|
|
||||||
elif condition_type == "AND":
|
|
||||||
and_listeners[listener_name] = set(trigger_methods)
|
|
||||||
|
|
||||||
# Breadth-first traversal to assign levels
|
# Breadth-first traversal to assign levels
|
||||||
while queue:
|
while queue:
|
||||||
current = queue.popleft()
|
current = queue.pop(0)
|
||||||
current_level = levels[current]
|
current_level = levels[current]
|
||||||
visited.add(current)
|
visited.add(current)
|
||||||
|
|
||||||
for listener_name in or_listeners[current]:
|
for listener_name, (condition_type, trigger_methods) in flow._listeners.items():
|
||||||
if listener_name not in levels or levels[listener_name] > current_level + 1:
|
if condition_type == "OR":
|
||||||
levels[listener_name] = current_level + 1
|
if current in trigger_methods:
|
||||||
if listener_name not in visited:
|
if (
|
||||||
queue.append(listener_name)
|
listener_name not in levels
|
||||||
|
or levels[listener_name] > current_level + 1
|
||||||
for listener_name, required_methods in and_listeners.items():
|
):
|
||||||
if current in required_methods:
|
levels[listener_name] = current_level + 1
|
||||||
|
if listener_name not in visited:
|
||||||
|
queue.append(listener_name)
|
||||||
|
elif condition_type == "AND":
|
||||||
if listener_name not in pending_and_listeners:
|
if listener_name not in pending_and_listeners:
|
||||||
pending_and_listeners[listener_name] = set()
|
pending_and_listeners[listener_name] = set()
|
||||||
pending_and_listeners[listener_name].add(current)
|
if current in trigger_methods:
|
||||||
|
pending_and_listeners[listener_name].add(current)
|
||||||
if required_methods == pending_and_listeners[listener_name]:
|
if set(trigger_methods) == pending_and_listeners[listener_name]:
|
||||||
if (
|
if (
|
||||||
listener_name not in levels
|
listener_name not in levels
|
||||||
or levels[listener_name] > current_level + 1
|
or levels[listener_name] > current_level + 1
|
||||||
@@ -167,7 +159,22 @@ def calculate_node_levels(flow: Any) -> Dict[str, int]:
|
|||||||
queue.append(listener_name)
|
queue.append(listener_name)
|
||||||
|
|
||||||
# Handle router connections
|
# Handle router connections
|
||||||
process_router_paths(flow, current, current_level, levels, queue)
|
if current in flow._routers:
|
||||||
|
router_method_name = current
|
||||||
|
paths = flow._router_paths.get(router_method_name, [])
|
||||||
|
for path in paths:
|
||||||
|
for listener_name, (
|
||||||
|
condition_type,
|
||||||
|
trigger_methods,
|
||||||
|
) in flow._listeners.items():
|
||||||
|
if path in trigger_methods:
|
||||||
|
if (
|
||||||
|
listener_name not in levels
|
||||||
|
or levels[listener_name] > current_level + 1
|
||||||
|
):
|
||||||
|
levels[listener_name] = current_level + 1
|
||||||
|
if listener_name not in visited:
|
||||||
|
queue.append(listener_name)
|
||||||
|
|
||||||
return levels
|
return levels
|
||||||
|
|
||||||
@@ -220,7 +227,10 @@ def build_ancestor_dict(flow: Any) -> Dict[str, Set[str]]:
|
|||||||
|
|
||||||
|
|
||||||
def dfs_ancestors(
|
def dfs_ancestors(
|
||||||
node: str, ancestors: Dict[str, Set[str]], visited: Set[str], flow: Any
|
node: str,
|
||||||
|
ancestors: Dict[str, Set[str]],
|
||||||
|
visited: Set[str],
|
||||||
|
flow: Any
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Perform depth-first search to build ancestor relationships.
|
Perform depth-first search to build ancestor relationships.
|
||||||
@@ -264,9 +274,7 @@ def dfs_ancestors(
|
|||||||
dfs_ancestors(listener_name, ancestors, visited, flow)
|
dfs_ancestors(listener_name, ancestors, visited, flow)
|
||||||
|
|
||||||
|
|
||||||
def is_ancestor(
|
def is_ancestor(node: str, ancestor_candidate: str, ancestors: Dict[str, Set[str]]) -> bool:
|
||||||
node: str, ancestor_candidate: str, ancestors: Dict[str, Set[str]]
|
|
||||||
) -> bool:
|
|
||||||
"""
|
"""
|
||||||
Check if one node is an ancestor of another.
|
Check if one node is an ancestor of another.
|
||||||
|
|
||||||
@@ -331,9 +339,7 @@ def build_parent_children_dict(flow: Any) -> Dict[str, List[str]]:
|
|||||||
return parent_children
|
return parent_children
|
||||||
|
|
||||||
|
|
||||||
def get_child_index(
|
def get_child_index(parent: str, child: str, parent_children: Dict[str, List[str]]) -> int:
|
||||||
parent: str, child: str, parent_children: Dict[str, List[str]]
|
|
||||||
) -> int:
|
|
||||||
"""
|
"""
|
||||||
Get the index of a child node in its parent's sorted children list.
|
Get the index of a child node in its parent's sorted children list.
|
||||||
|
|
||||||
@@ -354,23 +360,3 @@ def get_child_index(
|
|||||||
children = parent_children.get(parent, [])
|
children = parent_children.get(parent, [])
|
||||||
children.sort()
|
children.sort()
|
||||||
return children.index(child)
|
return children.index(child)
|
||||||
|
|
||||||
|
|
||||||
def process_router_paths(flow, current, current_level, levels, queue):
|
|
||||||
"""
|
|
||||||
Handle the router connections for the current node.
|
|
||||||
"""
|
|
||||||
if current in flow._routers:
|
|
||||||
paths = flow._router_paths.get(current, [])
|
|
||||||
for path in paths:
|
|
||||||
for listener_name, (
|
|
||||||
condition_type,
|
|
||||||
trigger_methods,
|
|
||||||
) in flow._listeners.items():
|
|
||||||
if path in trigger_methods:
|
|
||||||
if (
|
|
||||||
listener_name not in levels
|
|
||||||
or levels[listener_name] > current_level + 1
|
|
||||||
):
|
|
||||||
levels[listener_name] = current_level + 1
|
|
||||||
queue.append(listener_name)
|
|
||||||
|
|||||||
@@ -15,20 +15,20 @@ class Knowledge(BaseModel):
|
|||||||
Args:
|
Args:
|
||||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||||
storage: Optional[KnowledgeStorage] = Field(default=None)
|
storage: Optional[KnowledgeStorage] = Field(default=None)
|
||||||
embedder: Optional[Dict[str, Any]] = None
|
embedder_config: Optional[Dict[str, Any]] = None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||||
storage: Optional[KnowledgeStorage] = Field(default=None)
|
storage: Optional[KnowledgeStorage] = Field(default=None)
|
||||||
embedder: Optional[Dict[str, Any]] = None
|
embedder_config: Optional[Dict[str, Any]] = None
|
||||||
collection_name: Optional[str] = None
|
collection_name: Optional[str] = None
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
collection_name: str,
|
collection_name: str,
|
||||||
sources: List[BaseKnowledgeSource],
|
sources: List[BaseKnowledgeSource],
|
||||||
embedder: Optional[Dict[str, Any]] = None,
|
embedder_config: Optional[Dict[str, Any]] = None,
|
||||||
storage: Optional[KnowledgeStorage] = None,
|
storage: Optional[KnowledgeStorage] = None,
|
||||||
**data,
|
**data,
|
||||||
):
|
):
|
||||||
@@ -37,23 +37,25 @@ class Knowledge(BaseModel):
|
|||||||
self.storage = storage
|
self.storage = storage
|
||||||
else:
|
else:
|
||||||
self.storage = KnowledgeStorage(
|
self.storage = KnowledgeStorage(
|
||||||
embedder=embedder, collection_name=collection_name
|
embedder_config=embedder_config, collection_name=collection_name
|
||||||
)
|
)
|
||||||
self.sources = sources
|
self.sources = sources
|
||||||
self.storage.initialize_knowledge_storage()
|
self.storage.initialize_knowledge_storage()
|
||||||
self._add_sources()
|
for source in sources:
|
||||||
|
source.storage = self.storage
|
||||||
|
source.add()
|
||||||
|
|
||||||
def query(self, query: List[str], limit: int = 3) -> List[Dict[str, Any]]:
|
def query(self, query: List[str], limit: int = 3) -> List[Dict[str, Any]]:
|
||||||
"""
|
"""
|
||||||
Query across all knowledge sources to find the most relevant information.
|
Query across all knowledge sources to find the most relevant information.
|
||||||
Returns the top_k most relevant chunks.
|
Returns the top_k most relevant chunks.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If storage is not initialized.
|
ValueError: If storage is not initialized.
|
||||||
"""
|
"""
|
||||||
if self.storage is None:
|
if self.storage is None:
|
||||||
raise ValueError("Storage is not initialized.")
|
raise ValueError("Storage is not initialized.")
|
||||||
|
|
||||||
results = self.storage.search(
|
results = self.storage.search(
|
||||||
query,
|
query,
|
||||||
limit,
|
limit,
|
||||||
@@ -61,15 +63,6 @@ class Knowledge(BaseModel):
|
|||||||
return results
|
return results
|
||||||
|
|
||||||
def _add_sources(self):
|
def _add_sources(self):
|
||||||
try:
|
for source in self.sources:
|
||||||
for source in self.sources:
|
source.storage = self.storage
|
||||||
source.storage = self.storage
|
source.add()
|
||||||
source.add()
|
|
||||||
except Exception as e:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
def reset(self) -> None:
|
|
||||||
if self.storage:
|
|
||||||
self.storage.reset()
|
|
||||||
else:
|
|
||||||
raise ValueError("Storage is not initialized.")
|
|
||||||
|
|||||||
@@ -29,13 +29,7 @@ class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
|
|||||||
def validate_file_path(cls, v, info):
|
def validate_file_path(cls, v, info):
|
||||||
"""Validate that at least one of file_path or file_paths is provided."""
|
"""Validate that at least one of file_path or file_paths is provided."""
|
||||||
# Single check if both are None, O(1) instead of nested conditions
|
# Single check if both are None, O(1) instead of nested conditions
|
||||||
if (
|
if v is None and info.data.get("file_path" if info.field_name == "file_paths" else "file_paths") is None:
|
||||||
v is None
|
|
||||||
and info.data.get(
|
|
||||||
"file_path" if info.field_name == "file_paths" else "file_paths"
|
|
||||||
)
|
|
||||||
is None
|
|
||||||
):
|
|
||||||
raise ValueError("Either file_path or file_paths must be provided")
|
raise ValueError("Either file_path or file_paths must be provided")
|
||||||
return v
|
return v
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ try:
|
|||||||
from docling.exceptions import ConversionError
|
from docling.exceptions import ConversionError
|
||||||
from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
|
from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
|
||||||
from docling_core.types.doc.document import DoclingDocument
|
from docling_core.types.doc.document import DoclingDocument
|
||||||
|
|
||||||
DOCLING_AVAILABLE = True
|
DOCLING_AVAILABLE = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
DOCLING_AVAILABLE = False
|
DOCLING_AVAILABLE = False
|
||||||
@@ -39,8 +38,8 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
|||||||
file_paths: List[Union[Path, str]] = Field(default_factory=list)
|
file_paths: List[Union[Path, str]] = Field(default_factory=list)
|
||||||
chunks: List[str] = Field(default_factory=list)
|
chunks: List[str] = Field(default_factory=list)
|
||||||
safe_file_paths: List[Union[Path, str]] = Field(default_factory=list)
|
safe_file_paths: List[Union[Path, str]] = Field(default_factory=list)
|
||||||
content: List["DoclingDocument"] = Field(default_factory=list)
|
content: List[DoclingDocument] = Field(default_factory=list)
|
||||||
document_converter: "DocumentConverter" = Field(
|
document_converter: DocumentConverter = Field(
|
||||||
default_factory=lambda: DocumentConverter(
|
default_factory=lambda: DocumentConverter(
|
||||||
allowed_formats=[
|
allowed_formats=[
|
||||||
InputFormat.MD,
|
InputFormat.MD,
|
||||||
@@ -66,7 +65,7 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
|||||||
self.safe_file_paths = self.validate_content()
|
self.safe_file_paths = self.validate_content()
|
||||||
self.content = self._load_content()
|
self.content = self._load_content()
|
||||||
|
|
||||||
def _load_content(self) -> List["DoclingDocument"]:
|
def _load_content(self) -> List[DoclingDocument]:
|
||||||
try:
|
try:
|
||||||
return self._convert_source_to_docling_documents()
|
return self._convert_source_to_docling_documents()
|
||||||
except ConversionError as e:
|
except ConversionError as e:
|
||||||
@@ -88,11 +87,11 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
|||||||
self.chunks.extend(list(new_chunks_iterable))
|
self.chunks.extend(list(new_chunks_iterable))
|
||||||
self._save_documents()
|
self._save_documents()
|
||||||
|
|
||||||
def _convert_source_to_docling_documents(self) -> List["DoclingDocument"]:
|
def _convert_source_to_docling_documents(self) -> List[DoclingDocument]:
|
||||||
conv_results_iter = self.document_converter.convert_all(self.safe_file_paths)
|
conv_results_iter = self.document_converter.convert_all(self.safe_file_paths)
|
||||||
return [result.document for result in conv_results_iter]
|
return [result.document for result in conv_results_iter]
|
||||||
|
|
||||||
def _chunk_doc(self, doc: "DoclingDocument") -> Iterator[str]:
|
def _chunk_doc(self, doc: DoclingDocument) -> Iterator[str]:
|
||||||
chunker = HierarchicalChunker()
|
chunker = HierarchicalChunker()
|
||||||
for chunk in chunker.chunk(doc):
|
for chunk in chunker.chunk(doc):
|
||||||
yield chunk.text
|
yield chunk.text
|
||||||
|
|||||||
@@ -1,138 +1,28 @@
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, Iterator, List, Optional, Union
|
from typing import Dict, List
|
||||||
from urllib.parse import urlparse
|
|
||||||
|
|
||||||
from pydantic import Field, field_validator
|
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||||
|
|
||||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
|
||||||
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
|
|
||||||
from crewai.utilities.logger import Logger
|
|
||||||
|
|
||||||
|
|
||||||
class ExcelKnowledgeSource(BaseKnowledgeSource):
|
class ExcelKnowledgeSource(BaseFileKnowledgeSource):
|
||||||
"""A knowledge source that stores and queries Excel file content using embeddings."""
|
"""A knowledge source that stores and queries Excel file content using embeddings."""
|
||||||
|
|
||||||
# override content to be a dict of file paths to sheet names to csv content
|
def load_content(self) -> Dict[Path, str]:
|
||||||
|
"""Load and preprocess Excel file content."""
|
||||||
_logger: Logger = Logger(verbose=True)
|
|
||||||
|
|
||||||
file_path: Optional[Union[Path, List[Path], str, List[str]]] = Field(
|
|
||||||
default=None,
|
|
||||||
description="[Deprecated] The path to the file. Use file_paths instead.",
|
|
||||||
)
|
|
||||||
file_paths: Optional[Union[Path, List[Path], str, List[str]]] = Field(
|
|
||||||
default_factory=list, description="The path to the file"
|
|
||||||
)
|
|
||||||
chunks: List[str] = Field(default_factory=list)
|
|
||||||
content: Dict[Path, Dict[str, str]] = Field(default_factory=dict)
|
|
||||||
safe_file_paths: List[Path] = Field(default_factory=list)
|
|
||||||
|
|
||||||
@field_validator("file_path", "file_paths", mode="before")
|
|
||||||
def validate_file_path(cls, v, info):
|
|
||||||
"""Validate that at least one of file_path or file_paths is provided."""
|
|
||||||
# Single check if both are None, O(1) instead of nested conditions
|
|
||||||
if (
|
|
||||||
v is None
|
|
||||||
and info.data.get(
|
|
||||||
"file_path" if info.field_name == "file_paths" else "file_paths"
|
|
||||||
)
|
|
||||||
is None
|
|
||||||
):
|
|
||||||
raise ValueError("Either file_path or file_paths must be provided")
|
|
||||||
return v
|
|
||||||
|
|
||||||
def _process_file_paths(self) -> List[Path]:
|
|
||||||
"""Convert file_path to a list of Path objects."""
|
|
||||||
|
|
||||||
if hasattr(self, "file_path") and self.file_path is not None:
|
|
||||||
self._logger.log(
|
|
||||||
"warning",
|
|
||||||
"The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.",
|
|
||||||
color="yellow",
|
|
||||||
)
|
|
||||||
self.file_paths = self.file_path
|
|
||||||
|
|
||||||
if self.file_paths is None:
|
|
||||||
raise ValueError("Your source must be provided with a file_paths: []")
|
|
||||||
|
|
||||||
# Convert single path to list
|
|
||||||
path_list: List[Union[Path, str]] = (
|
|
||||||
[self.file_paths]
|
|
||||||
if isinstance(self.file_paths, (str, Path))
|
|
||||||
else list(self.file_paths)
|
|
||||||
if isinstance(self.file_paths, list)
|
|
||||||
else []
|
|
||||||
)
|
|
||||||
|
|
||||||
if not path_list:
|
|
||||||
raise ValueError(
|
|
||||||
"file_path/file_paths must be a Path, str, or a list of these types"
|
|
||||||
)
|
|
||||||
|
|
||||||
return [self.convert_to_path(path) for path in path_list]
|
|
||||||
|
|
||||||
def validate_content(self):
|
|
||||||
"""Validate the paths."""
|
|
||||||
for path in self.safe_file_paths:
|
|
||||||
if not path.exists():
|
|
||||||
self._logger.log(
|
|
||||||
"error",
|
|
||||||
f"File not found: {path}. Try adding sources to the knowledge directory. If it's inside the knowledge directory, use the relative path.",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
raise FileNotFoundError(f"File not found: {path}")
|
|
||||||
if not path.is_file():
|
|
||||||
self._logger.log(
|
|
||||||
"error",
|
|
||||||
f"Path is not a file: {path}",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
|
|
||||||
def model_post_init(self, _) -> None:
|
|
||||||
if self.file_path:
|
|
||||||
self._logger.log(
|
|
||||||
"warning",
|
|
||||||
"The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.",
|
|
||||||
color="yellow",
|
|
||||||
)
|
|
||||||
self.file_paths = self.file_path
|
|
||||||
self.safe_file_paths = self._process_file_paths()
|
|
||||||
self.validate_content()
|
|
||||||
self.content = self._load_content()
|
|
||||||
|
|
||||||
def _load_content(self) -> Dict[Path, Dict[str, str]]:
|
|
||||||
"""Load and preprocess Excel file content from multiple sheets.
|
|
||||||
|
|
||||||
Each sheet's content is converted to CSV format and stored.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict[Path, Dict[str, str]]: A mapping of file paths to their respective sheet contents.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ImportError: If required dependencies are missing.
|
|
||||||
FileNotFoundError: If the specified Excel file cannot be opened.
|
|
||||||
"""
|
|
||||||
pd = self._import_dependencies()
|
pd = self._import_dependencies()
|
||||||
|
|
||||||
content_dict = {}
|
content_dict = {}
|
||||||
for file_path in self.safe_file_paths:
|
for file_path in self.safe_file_paths:
|
||||||
file_path = self.convert_to_path(file_path)
|
file_path = self.convert_to_path(file_path)
|
||||||
with pd.ExcelFile(file_path) as xl:
|
df = pd.read_excel(file_path)
|
||||||
sheet_dict = {
|
content = df.to_csv(index=False)
|
||||||
str(sheet_name): str(
|
content_dict[file_path] = content
|
||||||
pd.read_excel(xl, sheet_name).to_csv(index=False)
|
|
||||||
)
|
|
||||||
for sheet_name in xl.sheet_names
|
|
||||||
}
|
|
||||||
content_dict[file_path] = sheet_dict
|
|
||||||
return content_dict
|
return content_dict
|
||||||
|
|
||||||
def convert_to_path(self, path: Union[Path, str]) -> Path:
|
|
||||||
"""Convert a path to a Path object."""
|
|
||||||
return Path(KNOWLEDGE_DIRECTORY + "/" + path) if isinstance(path, str) else path
|
|
||||||
|
|
||||||
def _import_dependencies(self):
|
def _import_dependencies(self):
|
||||||
"""Dynamically import dependencies."""
|
"""Dynamically import dependencies."""
|
||||||
try:
|
try:
|
||||||
|
import openpyxl # noqa
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
return pd
|
return pd
|
||||||
@@ -148,14 +38,10 @@ class ExcelKnowledgeSource(BaseKnowledgeSource):
|
|||||||
and save the embeddings.
|
and save the embeddings.
|
||||||
"""
|
"""
|
||||||
# Convert dictionary values to a single string if content is a dictionary
|
# Convert dictionary values to a single string if content is a dictionary
|
||||||
# Updated to account for .xlsx workbooks with multiple tabs/sheets
|
if isinstance(self.content, dict):
|
||||||
content_str = ""
|
content_str = "\n".join(str(value) for value in self.content.values())
|
||||||
for value in self.content.values():
|
else:
|
||||||
if isinstance(value, dict):
|
content_str = str(self.content)
|
||||||
for sheet_value in value.values():
|
|
||||||
content_str += str(sheet_value) + "\n"
|
|
||||||
else:
|
|
||||||
content_str += str(value) + "\n"
|
|
||||||
|
|
||||||
new_chunks = self._chunk_text(content_str)
|
new_chunks = self._chunk_text(content_str)
|
||||||
self.chunks.extend(new_chunks)
|
self.chunks.extend(new_chunks)
|
||||||
|
|||||||
@@ -48,11 +48,11 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
embedder: Optional[Dict[str, Any]] = None,
|
embedder_config: Optional[Dict[str, Any]] = None,
|
||||||
collection_name: Optional[str] = None,
|
collection_name: Optional[str] = None,
|
||||||
):
|
):
|
||||||
self.collection_name = collection_name
|
self.collection_name = collection_name
|
||||||
self._set_embedder_config(embedder)
|
self._set_embedder_config(embedder_config)
|
||||||
|
|
||||||
def search(
|
def search(
|
||||||
self,
|
self,
|
||||||
@@ -76,7 +76,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
|||||||
"context": fetched["documents"][0][i], # type: ignore
|
"context": fetched["documents"][0][i], # type: ignore
|
||||||
"score": fetched["distances"][0][i], # type: ignore
|
"score": fetched["distances"][0][i], # type: ignore
|
||||||
}
|
}
|
||||||
if result["score"] >= score_threshold:
|
if result["score"] >= score_threshold: # type: ignore
|
||||||
results.append(result)
|
results.append(result)
|
||||||
return results
|
return results
|
||||||
else:
|
else:
|
||||||
@@ -99,7 +99,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
|||||||
)
|
)
|
||||||
if self.app:
|
if self.app:
|
||||||
self.collection = self.app.get_or_create_collection(
|
self.collection = self.app.get_or_create_collection(
|
||||||
name=collection_name, embedding_function=self.embedder
|
name=collection_name, embedding_function=self.embedder_config
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise Exception("Vector Database Client not initialized")
|
raise Exception("Vector Database Client not initialized")
|
||||||
@@ -187,15 +187,17 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
|||||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||||
)
|
)
|
||||||
|
|
||||||
def _set_embedder_config(self, embedder: Optional[Dict[str, Any]] = None) -> None:
|
def _set_embedder_config(
|
||||||
|
self, embedder_config: Optional[Dict[str, Any]] = None
|
||||||
|
) -> None:
|
||||||
"""Set the embedding configuration for the knowledge storage.
|
"""Set the embedding configuration for the knowledge storage.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
embedder_config (Optional[Dict[str, Any]]): Configuration dictionary for the embedder.
|
embedder_config (Optional[Dict[str, Any]]): Configuration dictionary for the embedder.
|
||||||
If None or empty, defaults to the default embedding function.
|
If None or empty, defaults to the default embedding function.
|
||||||
"""
|
"""
|
||||||
self.embedder = (
|
self.embedder_config = (
|
||||||
EmbeddingConfigurator().configure_embedder(embedder)
|
EmbeddingConfigurator().configure_embedder(embedder_config)
|
||||||
if embedder
|
if embedder_config
|
||||||
else self._create_default_embedding_function()
|
else self._create_default_embedding_function()
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -5,28 +5,17 @@ import sys
|
|||||||
import threading
|
import threading
|
||||||
import warnings
|
import warnings
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from typing import Any, Dict, List, Literal, Optional, Type, Union, cast
|
from typing import Any, Dict, List, Optional, Union, cast
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from crewai.utilities.events.llm_events import (
|
|
||||||
LLMCallCompletedEvent,
|
|
||||||
LLMCallFailedEvent,
|
|
||||||
LLMCallStartedEvent,
|
|
||||||
LLMCallType,
|
|
||||||
)
|
|
||||||
from crewai.utilities.events.tool_usage_events import ToolExecutionErrorEvent
|
|
||||||
|
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
warnings.simplefilter("ignore", UserWarning)
|
warnings.simplefilter("ignore", UserWarning)
|
||||||
import litellm
|
import litellm
|
||||||
from litellm import Choices
|
from litellm import Choices, get_supported_openai_params
|
||||||
from litellm.types.utils import ModelResponse
|
from litellm.types.utils import ModelResponse
|
||||||
from litellm.utils import get_supported_openai_params, supports_response_schema
|
|
||||||
|
|
||||||
|
|
||||||
from crewai.utilities.events import crewai_event_bus
|
|
||||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||||
LLMContextLengthExceededException,
|
LLMContextLengthExceededException,
|
||||||
)
|
)
|
||||||
@@ -64,7 +53,6 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|||||||
"gpt-4-turbo": 128000,
|
"gpt-4-turbo": 128000,
|
||||||
"o1-preview": 128000,
|
"o1-preview": 128000,
|
||||||
"o1-mini": 128000,
|
"o1-mini": 128000,
|
||||||
"o3-mini": 200000, # Based on official o3-mini specifications
|
|
||||||
# gemini
|
# gemini
|
||||||
"gemini-2.0-flash": 1048576,
|
"gemini-2.0-flash": 1048576,
|
||||||
"gemini-1.5-pro": 2097152,
|
"gemini-1.5-pro": 2097152,
|
||||||
@@ -140,23 +128,21 @@ class LLM:
|
|||||||
presence_penalty: Optional[float] = None,
|
presence_penalty: Optional[float] = None,
|
||||||
frequency_penalty: Optional[float] = None,
|
frequency_penalty: Optional[float] = None,
|
||||||
logit_bias: Optional[Dict[int, float]] = None,
|
logit_bias: Optional[Dict[int, float]] = None,
|
||||||
response_format: Optional[Type[BaseModel]] = None,
|
response_format: Optional[Dict[str, Any]] = None,
|
||||||
seed: Optional[int] = None,
|
seed: Optional[int] = None,
|
||||||
logprobs: Optional[int] = None,
|
logprobs: Optional[int] = None,
|
||||||
top_logprobs: Optional[int] = None,
|
top_logprobs: Optional[int] = None,
|
||||||
base_url: Optional[str] = None,
|
base_url: Optional[str] = None,
|
||||||
api_base: Optional[str] = None,
|
|
||||||
api_version: Optional[str] = None,
|
api_version: Optional[str] = None,
|
||||||
api_key: Optional[str] = None,
|
api_key: Optional[str] = None,
|
||||||
callbacks: List[Any] = [],
|
callbacks: List[Any] = [],
|
||||||
reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None,
|
|
||||||
**kwargs,
|
|
||||||
):
|
):
|
||||||
self.model = model
|
self.model = model
|
||||||
self.timeout = timeout
|
self.timeout = timeout
|
||||||
self.temperature = temperature
|
self.temperature = temperature
|
||||||
self.top_p = top_p
|
self.top_p = top_p
|
||||||
self.n = n
|
self.n = n
|
||||||
|
self.stop = stop
|
||||||
self.max_completion_tokens = max_completion_tokens
|
self.max_completion_tokens = max_completion_tokens
|
||||||
self.max_tokens = max_tokens
|
self.max_tokens = max_tokens
|
||||||
self.presence_penalty = presence_penalty
|
self.presence_penalty = presence_penalty
|
||||||
@@ -167,119 +153,47 @@ class LLM:
|
|||||||
self.logprobs = logprobs
|
self.logprobs = logprobs
|
||||||
self.top_logprobs = top_logprobs
|
self.top_logprobs = top_logprobs
|
||||||
self.base_url = base_url
|
self.base_url = base_url
|
||||||
self.api_base = api_base
|
|
||||||
self.api_version = api_version
|
self.api_version = api_version
|
||||||
self.api_key = api_key
|
self.api_key = api_key
|
||||||
self.callbacks = callbacks
|
self.callbacks = callbacks
|
||||||
self.context_window_size = 0
|
self.context_window_size = 0
|
||||||
self.reasoning_effort = reasoning_effort
|
|
||||||
self.additional_params = kwargs
|
|
||||||
self.is_anthropic = self._is_anthropic_model(model)
|
|
||||||
|
|
||||||
litellm.drop_params = True
|
litellm.drop_params = True
|
||||||
|
|
||||||
# Normalize self.stop to always be a List[str]
|
|
||||||
if stop is None:
|
|
||||||
self.stop: List[str] = []
|
|
||||||
elif isinstance(stop, str):
|
|
||||||
self.stop = [stop]
|
|
||||||
else:
|
|
||||||
self.stop = stop
|
|
||||||
|
|
||||||
self.set_callbacks(callbacks)
|
self.set_callbacks(callbacks)
|
||||||
self.set_env_callbacks()
|
self.set_env_callbacks()
|
||||||
|
|
||||||
def _is_anthropic_model(self, model: str) -> bool:
|
|
||||||
"""Determine if the model is from Anthropic provider.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model: The model identifier string.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if the model is from Anthropic, False otherwise.
|
|
||||||
"""
|
|
||||||
ANTHROPIC_PREFIXES = ("anthropic/", "claude-", "claude/")
|
|
||||||
return any(prefix in model.lower() for prefix in ANTHROPIC_PREFIXES)
|
|
||||||
|
|
||||||
def call(
|
def call(
|
||||||
self,
|
self,
|
||||||
messages: Union[str, List[Dict[str, str]]],
|
messages: List[Dict[str, str]],
|
||||||
tools: Optional[List[dict]] = None,
|
tools: Optional[List[dict]] = None,
|
||||||
callbacks: Optional[List[Any]] = None,
|
callbacks: Optional[List[Any]] = None,
|
||||||
available_functions: Optional[Dict[str, Any]] = None,
|
available_functions: Optional[Dict[str, Any]] = None,
|
||||||
) -> Union[str, Any]:
|
) -> str:
|
||||||
"""High-level LLM call method.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
messages: Input messages for the LLM.
|
|
||||||
Can be a string or list of message dictionaries.
|
|
||||||
If string, it will be converted to a single user message.
|
|
||||||
If list, each dict must have 'role' and 'content' keys.
|
|
||||||
tools: Optional list of tool schemas for function calling.
|
|
||||||
Each tool should define its name, description, and parameters.
|
|
||||||
callbacks: Optional list of callback functions to be executed
|
|
||||||
during and after the LLM call.
|
|
||||||
available_functions: Optional dict mapping function names to callables
|
|
||||||
that can be invoked by the LLM.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Union[str, Any]: Either a text response from the LLM (str) or
|
|
||||||
the result of a tool function call (Any).
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
TypeError: If messages format is invalid
|
|
||||||
ValueError: If response format is not supported
|
|
||||||
LLMContextLengthExceededException: If input exceeds model's context limit
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
# Example 1: Simple string input
|
|
||||||
>>> response = llm.call("Return the name of a random city.")
|
|
||||||
>>> print(response)
|
|
||||||
"Paris"
|
|
||||||
|
|
||||||
# Example 2: Message list with system and user messages
|
|
||||||
>>> messages = [
|
|
||||||
... {"role": "system", "content": "You are a geography expert"},
|
|
||||||
... {"role": "user", "content": "What is France's capital?"}
|
|
||||||
... ]
|
|
||||||
>>> response = llm.call(messages)
|
|
||||||
>>> print(response)
|
|
||||||
"The capital of France is Paris."
|
|
||||||
"""
|
"""
|
||||||
crewai_event_bus.emit(
|
High-level call method that:
|
||||||
self,
|
1) Calls litellm.completion
|
||||||
event=LLMCallStartedEvent(
|
2) Checks for function/tool calls
|
||||||
messages=messages,
|
3) If a tool call is found:
|
||||||
tools=tools,
|
a) executes the function
|
||||||
callbacks=callbacks,
|
b) returns the result
|
||||||
available_functions=available_functions,
|
4) If no tool call, returns the text response
|
||||||
),
|
|
||||||
)
|
|
||||||
# Validate parameters before proceeding with the call.
|
|
||||||
self._validate_call_params()
|
|
||||||
|
|
||||||
if isinstance(messages, str):
|
|
||||||
messages = [{"role": "user", "content": messages}]
|
|
||||||
|
|
||||||
# For O1 models, system messages are not supported.
|
|
||||||
# Convert any system messages into assistant messages.
|
|
||||||
if "o1" in self.model.lower():
|
|
||||||
for message in messages:
|
|
||||||
if message.get("role") == "system":
|
|
||||||
message["role"] = "assistant"
|
|
||||||
|
|
||||||
|
:param messages: The conversation messages
|
||||||
|
:param tools: Optional list of function schemas for function calling
|
||||||
|
:param callbacks: Optional list of callbacks
|
||||||
|
:param available_functions: A dictionary mapping function_name -> actual Python function
|
||||||
|
:return: Final text response from the LLM or the tool result
|
||||||
|
"""
|
||||||
with suppress_warnings():
|
with suppress_warnings():
|
||||||
if callbacks and len(callbacks) > 0:
|
if callbacks and len(callbacks) > 0:
|
||||||
self.set_callbacks(callbacks)
|
self.set_callbacks(callbacks)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# --- 1) Format messages according to provider requirements
|
# --- 1) Make the completion call
|
||||||
formatted_messages = self._format_messages_for_provider(messages)
|
|
||||||
|
|
||||||
# --- 2) Prepare the parameters for the completion call
|
|
||||||
params = {
|
params = {
|
||||||
"model": self.model,
|
"model": self.model,
|
||||||
"messages": formatted_messages,
|
"messages": messages,
|
||||||
"timeout": self.timeout,
|
"timeout": self.timeout,
|
||||||
"temperature": self.temperature,
|
"temperature": self.temperature,
|
||||||
"top_p": self.top_p,
|
"top_p": self.top_p,
|
||||||
@@ -293,20 +207,15 @@ class LLM:
|
|||||||
"seed": self.seed,
|
"seed": self.seed,
|
||||||
"logprobs": self.logprobs,
|
"logprobs": self.logprobs,
|
||||||
"top_logprobs": self.top_logprobs,
|
"top_logprobs": self.top_logprobs,
|
||||||
"api_base": self.api_base,
|
"api_base": self.base_url,
|
||||||
"base_url": self.base_url,
|
|
||||||
"api_version": self.api_version,
|
"api_version": self.api_version,
|
||||||
"api_key": self.api_key,
|
"api_key": self.api_key,
|
||||||
"stream": False,
|
"stream": False,
|
||||||
"tools": tools,
|
"tools": tools, # pass the tool schema
|
||||||
"reasoning_effort": self.reasoning_effort,
|
|
||||||
**self.additional_params,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Remove None values from params
|
|
||||||
params = {k: v for k, v in params.items() if v is not None}
|
params = {k: v for k, v in params.items() if v is not None}
|
||||||
|
|
||||||
# --- 2) Make the completion call
|
|
||||||
response = litellm.completion(**params)
|
response = litellm.completion(**params)
|
||||||
response_message = cast(Choices, cast(ModelResponse, response).choices)[
|
response_message = cast(Choices, cast(ModelResponse, response).choices)[
|
||||||
0
|
0
|
||||||
@@ -314,25 +223,11 @@ class LLM:
|
|||||||
text_response = response_message.content or ""
|
text_response = response_message.content or ""
|
||||||
tool_calls = getattr(response_message, "tool_calls", [])
|
tool_calls = getattr(response_message, "tool_calls", [])
|
||||||
|
|
||||||
# --- 3) Handle callbacks with usage info
|
# --- 2) If no tool calls, return the text response
|
||||||
if callbacks and len(callbacks) > 0:
|
|
||||||
for callback in callbacks:
|
|
||||||
if hasattr(callback, "log_success_event"):
|
|
||||||
usage_info = getattr(response, "usage", None)
|
|
||||||
if usage_info:
|
|
||||||
callback.log_success_event(
|
|
||||||
kwargs=params,
|
|
||||||
response_obj={"usage": usage_info},
|
|
||||||
start_time=0,
|
|
||||||
end_time=0,
|
|
||||||
)
|
|
||||||
|
|
||||||
# --- 4) If no tool calls, return the text response
|
|
||||||
if not tool_calls or not available_functions:
|
if not tool_calls or not available_functions:
|
||||||
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL)
|
|
||||||
return text_response
|
return text_response
|
||||||
|
|
||||||
# --- 5) Handle the tool call
|
# --- 3) Handle the tool call
|
||||||
tool_call = tool_calls[0]
|
tool_call = tool_calls[0]
|
||||||
function_name = tool_call.function.name
|
function_name = tool_call.function.name
|
||||||
|
|
||||||
@@ -347,28 +242,13 @@ class LLM:
|
|||||||
try:
|
try:
|
||||||
# Call the actual tool function
|
# Call the actual tool function
|
||||||
result = fn(**function_args)
|
result = fn(**function_args)
|
||||||
self._handle_emit_call_events(result, LLMCallType.TOOL_CALL)
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(
|
logging.error(
|
||||||
f"Error executing function '{function_name}': {e}"
|
f"Error executing function '{function_name}': {e}"
|
||||||
)
|
)
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
event=ToolExecutionErrorEvent(
|
|
||||||
tool_name=function_name,
|
|
||||||
tool_args=function_args,
|
|
||||||
tool_class=fn,
|
|
||||||
error=str(e),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
event=LLMCallFailedEvent(
|
|
||||||
error=f"Tool execution error: {str(e)}"
|
|
||||||
),
|
|
||||||
)
|
|
||||||
return text_response
|
return text_response
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@@ -378,98 +258,16 @@ class LLM:
|
|||||||
return text_response
|
return text_response
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
event=LLMCallFailedEvent(error=str(e)),
|
|
||||||
)
|
|
||||||
if not LLMContextLengthExceededException(
|
if not LLMContextLengthExceededException(
|
||||||
str(e)
|
str(e)
|
||||||
)._is_context_limit_error(str(e)):
|
)._is_context_limit_error(str(e)):
|
||||||
logging.error(f"LiteLLM call failed: {str(e)}")
|
logging.error(f"LiteLLM call failed: {str(e)}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def _handle_emit_call_events(self, response: Any, call_type: LLMCallType):
|
|
||||||
"""Handle the events for the LLM call.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
response (str): The response from the LLM call.
|
|
||||||
call_type (str): The type of call, either "tool_call" or "llm_call".
|
|
||||||
"""
|
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
event=LLMCallCompletedEvent(response=response, call_type=call_type),
|
|
||||||
)
|
|
||||||
|
|
||||||
def _format_messages_for_provider(
|
|
||||||
self, messages: List[Dict[str, str]]
|
|
||||||
) -> List[Dict[str, str]]:
|
|
||||||
"""Format messages according to provider requirements.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
messages: List of message dictionaries with 'role' and 'content' keys.
|
|
||||||
Can be empty or None.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of formatted messages according to provider requirements.
|
|
||||||
For Anthropic models, ensures first message has 'user' role.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
TypeError: If messages is None or contains invalid message format.
|
|
||||||
"""
|
|
||||||
if messages is None:
|
|
||||||
raise TypeError("Messages cannot be None")
|
|
||||||
|
|
||||||
# Validate message format first
|
|
||||||
for msg in messages:
|
|
||||||
if not isinstance(msg, dict) or "role" not in msg or "content" not in msg:
|
|
||||||
raise TypeError(
|
|
||||||
"Invalid message format. Each message must be a dict with 'role' and 'content' keys"
|
|
||||||
)
|
|
||||||
|
|
||||||
if not self.is_anthropic:
|
|
||||||
return messages
|
|
||||||
|
|
||||||
# Anthropic requires messages to start with 'user' role
|
|
||||||
if not messages or messages[0]["role"] == "system":
|
|
||||||
# If first message is system or empty, add a placeholder user message
|
|
||||||
return [{"role": "user", "content": "."}, *messages]
|
|
||||||
|
|
||||||
return messages
|
|
||||||
|
|
||||||
def _get_custom_llm_provider(self) -> str:
|
|
||||||
"""
|
|
||||||
Derives the custom_llm_provider from the model string.
|
|
||||||
- For example, if the model is "openrouter/deepseek/deepseek-chat", returns "openrouter".
|
|
||||||
- If the model is "gemini/gemini-1.5-pro", returns "gemini".
|
|
||||||
- If there is no '/', defaults to "openai".
|
|
||||||
"""
|
|
||||||
if "/" in self.model:
|
|
||||||
return self.model.split("/")[0]
|
|
||||||
return "openai"
|
|
||||||
|
|
||||||
def _validate_call_params(self) -> None:
|
|
||||||
"""
|
|
||||||
Validate parameters before making a call. Currently this only checks if
|
|
||||||
a response_format is provided and whether the model supports it.
|
|
||||||
The custom_llm_provider is dynamically determined from the model:
|
|
||||||
- E.g., "openrouter/deepseek/deepseek-chat" yields "openrouter"
|
|
||||||
- "gemini/gemini-1.5-pro" yields "gemini"
|
|
||||||
- If no slash is present, "openai" is assumed.
|
|
||||||
"""
|
|
||||||
provider = self._get_custom_llm_provider()
|
|
||||||
if self.response_format is not None and not supports_response_schema(
|
|
||||||
model=self.model,
|
|
||||||
custom_llm_provider=provider,
|
|
||||||
):
|
|
||||||
raise ValueError(
|
|
||||||
f"The model {self.model} does not support response_format for provider '{provider}'. "
|
|
||||||
"Please remove response_format or use a supported model."
|
|
||||||
)
|
|
||||||
|
|
||||||
def supports_function_calling(self) -> bool:
|
def supports_function_calling(self) -> bool:
|
||||||
try:
|
try:
|
||||||
params = get_supported_openai_params(model=self.model)
|
params = get_supported_openai_params(model=self.model)
|
||||||
return params is not None and "tools" in params
|
return "response_format" in params
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(f"Failed to get supported params: {str(e)}")
|
logging.error(f"Failed to get supported params: {str(e)}")
|
||||||
return False
|
return False
|
||||||
@@ -477,7 +275,7 @@ class LLM:
|
|||||||
def supports_stop_words(self) -> bool:
|
def supports_stop_words(self) -> bool:
|
||||||
try:
|
try:
|
||||||
params = get_supported_openai_params(model=self.model)
|
params = get_supported_openai_params(model=self.model)
|
||||||
return params is not None and "stop" in params
|
return "stop" in params
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(f"Failed to get supported params: {str(e)}")
|
logging.error(f"Failed to get supported params: {str(e)}")
|
||||||
return False
|
return False
|
||||||
@@ -486,23 +284,10 @@ class LLM:
|
|||||||
"""
|
"""
|
||||||
Returns the context window size, using 75% of the maximum to avoid
|
Returns the context window size, using 75% of the maximum to avoid
|
||||||
cutting off messages mid-thread.
|
cutting off messages mid-thread.
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If a model's context window size is outside valid bounds (1024-2097152)
|
|
||||||
"""
|
"""
|
||||||
if self.context_window_size != 0:
|
if self.context_window_size != 0:
|
||||||
return self.context_window_size
|
return self.context_window_size
|
||||||
|
|
||||||
MIN_CONTEXT = 1024
|
|
||||||
MAX_CONTEXT = 2097152 # Current max from gemini-1.5-pro
|
|
||||||
|
|
||||||
# Validate all context window sizes
|
|
||||||
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
|
|
||||||
if value < MIN_CONTEXT or value > MAX_CONTEXT:
|
|
||||||
raise ValueError(
|
|
||||||
f"Context window for {key} must be between {MIN_CONTEXT} and {MAX_CONTEXT}"
|
|
||||||
)
|
|
||||||
|
|
||||||
self.context_window_size = int(
|
self.context_window_size = int(
|
||||||
DEFAULT_CONTEXT_WINDOW_SIZE * CONTEXT_WINDOW_USAGE_RATIO
|
DEFAULT_CONTEXT_WINDOW_SIZE * CONTEXT_WINDOW_USAGE_RATIO
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,7 +1,3 @@
|
|||||||
from typing import Optional
|
|
||||||
|
|
||||||
from pydantic import PrivateAttr
|
|
||||||
|
|
||||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||||
from crewai.memory.memory import Memory
|
from crewai.memory.memory import Memory
|
||||||
from crewai.memory.storage.rag_storage import RAGStorage
|
from crewai.memory.storage.rag_storage import RAGStorage
|
||||||
@@ -14,15 +10,13 @@ class EntityMemory(Memory):
|
|||||||
Inherits from the Memory class.
|
Inherits from the Memory class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_memory_provider: Optional[str] = PrivateAttr()
|
|
||||||
|
|
||||||
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
||||||
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
|
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||||
memory_provider = crew.memory_config.get("provider")
|
self.memory_provider = crew.memory_config.get("provider")
|
||||||
else:
|
else:
|
||||||
memory_provider = None
|
self.memory_provider = None
|
||||||
|
|
||||||
if memory_provider == "mem0":
|
if self.memory_provider == "mem0":
|
||||||
try:
|
try:
|
||||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@@ -42,13 +36,11 @@ class EntityMemory(Memory):
|
|||||||
path=path,
|
path=path,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
super().__init__(storage)
|
||||||
super().__init__(storage=storage)
|
|
||||||
self._memory_provider = memory_provider
|
|
||||||
|
|
||||||
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||||
"""Saves an entity item into the SQLite storage."""
|
"""Saves an entity item into the SQLite storage."""
|
||||||
if self._memory_provider == "mem0":
|
if self.memory_provider == "mem0":
|
||||||
data = f"""
|
data = f"""
|
||||||
Remember details about the following entity:
|
Remember details about the following entity:
|
||||||
Name: {item.name}
|
Name: {item.name}
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ class LongTermMemory(Memory):
|
|||||||
def __init__(self, storage=None, path=None):
|
def __init__(self, storage=None, path=None):
|
||||||
if not storage:
|
if not storage:
|
||||||
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
|
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
|
||||||
super().__init__(storage=storage)
|
super().__init__(storage)
|
||||||
|
|
||||||
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||||
metadata = item.metadata
|
metadata = item.metadata
|
||||||
|
|||||||
@@ -1,19 +1,15 @@
|
|||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from crewai.memory.storage.rag_storage import RAGStorage
|
||||||
|
|
||||||
|
|
||||||
class Memory(BaseModel):
|
class Memory:
|
||||||
"""
|
"""
|
||||||
Base class for memory, now supporting agent tags and generic metadata.
|
Base class for memory, now supporting agent tags and generic metadata.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
embedder_config: Optional[Dict[str, Any]] = None
|
def __init__(self, storage: RAGStorage):
|
||||||
|
self.storage = storage
|
||||||
storage: Any
|
|
||||||
|
|
||||||
def __init__(self, storage: Any, **data: Any):
|
|
||||||
super().__init__(storage=storage, **data)
|
|
||||||
|
|
||||||
def save(
|
def save(
|
||||||
self,
|
self,
|
||||||
|
|||||||
@@ -1,7 +1,5 @@
|
|||||||
from typing import Any, Dict, Optional
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
from pydantic import PrivateAttr
|
|
||||||
|
|
||||||
from crewai.memory.memory import Memory
|
from crewai.memory.memory import Memory
|
||||||
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
||||||
from crewai.memory.storage.rag_storage import RAGStorage
|
from crewai.memory.storage.rag_storage import RAGStorage
|
||||||
@@ -16,15 +14,13 @@ class ShortTermMemory(Memory):
|
|||||||
MemoryItem instances.
|
MemoryItem instances.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_memory_provider: Optional[str] = PrivateAttr()
|
|
||||||
|
|
||||||
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
||||||
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
|
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||||
memory_provider = crew.memory_config.get("provider")
|
self.memory_provider = crew.memory_config.get("provider")
|
||||||
else:
|
else:
|
||||||
memory_provider = None
|
self.memory_provider = None
|
||||||
|
|
||||||
if memory_provider == "mem0":
|
if self.memory_provider == "mem0":
|
||||||
try:
|
try:
|
||||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@@ -43,8 +39,7 @@ class ShortTermMemory(Memory):
|
|||||||
path=path,
|
path=path,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
super().__init__(storage=storage)
|
super().__init__(storage)
|
||||||
self._memory_provider = memory_provider
|
|
||||||
|
|
||||||
def save(
|
def save(
|
||||||
self,
|
self,
|
||||||
@@ -53,7 +48,7 @@ class ShortTermMemory(Memory):
|
|||||||
agent: Optional[str] = None,
|
agent: Optional[str] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
||||||
if self._memory_provider == "mem0":
|
if self.memory_provider == "mem0":
|
||||||
item.data = f"Remember the following insights from Agent run: {item.data}"
|
item.data = f"Remember the following insights from Agent run: {item.data}"
|
||||||
|
|
||||||
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ class BaseRAGStorage(ABC):
|
|||||||
self,
|
self,
|
||||||
type: str,
|
type: str,
|
||||||
allow_reset: bool = True,
|
allow_reset: bool = True,
|
||||||
embedder_config: Optional[Dict[str, Any]] = None,
|
embedder_config: Optional[Any] = None,
|
||||||
crew: Any = None,
|
crew: Any = None,
|
||||||
):
|
):
|
||||||
self.type = type
|
self.type = type
|
||||||
|
|||||||
@@ -1,17 +1,12 @@
|
|||||||
import json
|
import json
|
||||||
import logging
|
|
||||||
import sqlite3
|
import sqlite3
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
from crewai.utilities import Printer
|
from crewai.utilities import Printer
|
||||||
from crewai.utilities.crew_json_encoder import CrewJSONEncoder
|
from crewai.utilities.crew_json_encoder import CrewJSONEncoder
|
||||||
from crewai.utilities.errors import DatabaseError, DatabaseOperationError
|
|
||||||
from crewai.utilities.paths import db_storage_path
|
from crewai.utilities.paths import db_storage_path
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class KickoffTaskOutputsSQLiteStorage:
|
class KickoffTaskOutputsSQLiteStorage:
|
||||||
"""
|
"""
|
||||||
@@ -19,24 +14,15 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self, db_path: Optional[str] = None
|
self, db_path: str = f"{db_storage_path()}/latest_kickoff_task_outputs.db"
|
||||||
) -> None:
|
) -> None:
|
||||||
if db_path is None:
|
|
||||||
# Get the parent directory of the default db path and create our db file there
|
|
||||||
db_path = str(Path(db_storage_path()) / "latest_kickoff_task_outputs.db")
|
|
||||||
self.db_path = db_path
|
self.db_path = db_path
|
||||||
self._printer: Printer = Printer()
|
self._printer: Printer = Printer()
|
||||||
self._initialize_db()
|
self._initialize_db()
|
||||||
|
|
||||||
def _initialize_db(self) -> None:
|
def _initialize_db(self):
|
||||||
"""Initialize the SQLite database and create the latest_kickoff_task_outputs table.
|
"""
|
||||||
|
Initializes the SQLite database and creates LTM table
|
||||||
This method sets up the database schema for storing task outputs. It creates
|
|
||||||
a table with columns for task_id, expected_output, output (as JSON),
|
|
||||||
task_index, inputs (as JSON), was_replayed flag, and timestamp.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
DatabaseOperationError: If database initialization fails due to SQLite errors.
|
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
with sqlite3.connect(self.db_path) as conn:
|
with sqlite3.connect(self.db_path) as conn:
|
||||||
@@ -57,9 +43,10 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
|
|
||||||
conn.commit()
|
conn.commit()
|
||||||
except sqlite3.Error as e:
|
except sqlite3.Error as e:
|
||||||
error_msg = DatabaseError.format_error(DatabaseError.INIT_ERROR, e)
|
self._printer.print(
|
||||||
logger.error(error_msg)
|
content=f"SAVING KICKOFF TASK OUTPUTS ERROR: An error occurred during database initialization: {e}",
|
||||||
raise DatabaseOperationError(error_msg, e)
|
color="red",
|
||||||
|
)
|
||||||
|
|
||||||
def add(
|
def add(
|
||||||
self,
|
self,
|
||||||
@@ -68,22 +55,9 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
task_index: int,
|
task_index: int,
|
||||||
was_replayed: bool = False,
|
was_replayed: bool = False,
|
||||||
inputs: Dict[str, Any] = {},
|
inputs: Dict[str, Any] = {},
|
||||||
) -> None:
|
):
|
||||||
"""Add a new task output record to the database.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task: The Task object containing task details.
|
|
||||||
output: Dictionary containing the task's output data.
|
|
||||||
task_index: Integer index of the task in the sequence.
|
|
||||||
was_replayed: Boolean indicating if this was a replay execution.
|
|
||||||
inputs: Dictionary of input parameters used for the task.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
DatabaseOperationError: If saving the task output fails due to SQLite errors.
|
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
with sqlite3.connect(self.db_path) as conn:
|
with sqlite3.connect(self.db_path) as conn:
|
||||||
conn.execute("BEGIN TRANSACTION")
|
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""
|
"""
|
||||||
@@ -102,31 +76,21 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
except sqlite3.Error as e:
|
except sqlite3.Error as e:
|
||||||
error_msg = DatabaseError.format_error(DatabaseError.SAVE_ERROR, e)
|
self._printer.print(
|
||||||
logger.error(error_msg)
|
content=f"SAVING KICKOFF TASK OUTPUTS ERROR: An error occurred during database initialization: {e}",
|
||||||
raise DatabaseOperationError(error_msg, e)
|
color="red",
|
||||||
|
)
|
||||||
|
|
||||||
def update(
|
def update(
|
||||||
self,
|
self,
|
||||||
task_index: int,
|
task_index: int,
|
||||||
**kwargs: Any,
|
**kwargs,
|
||||||
) -> None:
|
):
|
||||||
"""Update an existing task output record in the database.
|
"""
|
||||||
|
Updates an existing row in the latest_kickoff_task_outputs table based on task_index.
|
||||||
Updates fields of a task output record identified by task_index. The fields
|
|
||||||
to update are provided as keyword arguments.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task_index: Integer index of the task to update.
|
|
||||||
**kwargs: Arbitrary keyword arguments representing fields to update.
|
|
||||||
Values that are dictionaries will be JSON encoded.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
DatabaseOperationError: If updating the task output fails due to SQLite errors.
|
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
with sqlite3.connect(self.db_path) as conn:
|
with sqlite3.connect(self.db_path) as conn:
|
||||||
conn.execute("BEGIN TRANSACTION")
|
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
|
|
||||||
fields = []
|
fields = []
|
||||||
@@ -146,23 +110,14 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
||||||
if cursor.rowcount == 0:
|
if cursor.rowcount == 0:
|
||||||
logger.warning(f"No row found with task_index {task_index}. No update performed.")
|
self._printer.print(
|
||||||
|
f"No row found with task_index {task_index}. No update performed.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
except sqlite3.Error as e:
|
except sqlite3.Error as e:
|
||||||
error_msg = DatabaseError.format_error(DatabaseError.UPDATE_ERROR, e)
|
self._printer.print(f"UPDATE KICKOFF TASK OUTPUTS ERROR: {e}", color="red")
|
||||||
logger.error(error_msg)
|
|
||||||
raise DatabaseOperationError(error_msg, e)
|
|
||||||
|
|
||||||
def load(self) -> List[Dict[str, Any]]:
|
def load(self) -> Optional[List[Dict[str, Any]]]:
|
||||||
"""Load all task output records from the database.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of dictionaries containing task output records, ordered by task_index.
|
|
||||||
Each dictionary contains: task_id, expected_output, output, task_index,
|
|
||||||
inputs, was_replayed, and timestamp.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
DatabaseOperationError: If loading task outputs fails due to SQLite errors.
|
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
with sqlite3.connect(self.db_path) as conn:
|
with sqlite3.connect(self.db_path) as conn:
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
@@ -189,26 +144,23 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
return results
|
return results
|
||||||
|
|
||||||
except sqlite3.Error as e:
|
except sqlite3.Error as e:
|
||||||
error_msg = DatabaseError.format_error(DatabaseError.LOAD_ERROR, e)
|
self._printer.print(
|
||||||
logger.error(error_msg)
|
content=f"LOADING KICKOFF TASK OUTPUTS ERROR: An error occurred while querying kickoff task outputs: {e}",
|
||||||
raise DatabaseOperationError(error_msg, e)
|
color="red",
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
def delete_all(self) -> None:
|
def delete_all(self):
|
||||||
"""Delete all task output records from the database.
|
"""
|
||||||
|
Deletes all rows from the latest_kickoff_task_outputs table.
|
||||||
This method removes all records from the latest_kickoff_task_outputs table.
|
|
||||||
Use with caution as this operation cannot be undone.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
DatabaseOperationError: If deleting task outputs fails due to SQLite errors.
|
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
with sqlite3.connect(self.db_path) as conn:
|
with sqlite3.connect(self.db_path) as conn:
|
||||||
conn.execute("BEGIN TRANSACTION")
|
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute("DELETE FROM latest_kickoff_task_outputs")
|
cursor.execute("DELETE FROM latest_kickoff_task_outputs")
|
||||||
conn.commit()
|
conn.commit()
|
||||||
except sqlite3.Error as e:
|
except sqlite3.Error as e:
|
||||||
error_msg = DatabaseError.format_error(DatabaseError.DELETE_ERROR, e)
|
self._printer.print(
|
||||||
logger.error(error_msg)
|
content=f"ERROR: Failed to delete all kickoff task outputs: {e}",
|
||||||
raise DatabaseOperationError(error_msg, e)
|
color="red",
|
||||||
|
)
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
import json
|
import json
|
||||||
import sqlite3
|
import sqlite3
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any, Dict, List, Optional, Union
|
from typing import Any, Dict, List, Optional, Union
|
||||||
|
|
||||||
from crewai.utilities import Printer
|
from crewai.utilities import Printer
|
||||||
@@ -13,15 +12,10 @@ class LTMSQLiteStorage:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self, db_path: Optional[str] = None
|
self, db_path: str = f"{db_storage_path()}/long_term_memory_storage.db"
|
||||||
) -> None:
|
) -> None:
|
||||||
if db_path is None:
|
|
||||||
# Get the parent directory of the default db path and create our db file there
|
|
||||||
db_path = str(Path(db_storage_path()) / "long_term_memory_storage.db")
|
|
||||||
self.db_path = db_path
|
self.db_path = db_path
|
||||||
self._printer: Printer = Printer()
|
self._printer: Printer = Printer()
|
||||||
# Ensure parent directory exists
|
|
||||||
Path(self.db_path).parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
self._initialize_db()
|
self._initialize_db()
|
||||||
|
|
||||||
def _initialize_db(self):
|
def _initialize_db(self):
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ from typing import (
|
|||||||
Union,
|
Union,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from opentelemetry.trace import Span
|
||||||
from pydantic import (
|
from pydantic import (
|
||||||
UUID4,
|
UUID4,
|
||||||
BaseModel,
|
BaseModel,
|
||||||
@@ -35,15 +36,10 @@ from crewai.agents.agent_builder.base_agent import BaseAgent
|
|||||||
from crewai.tasks.guardrail_result import GuardrailResult
|
from crewai.tasks.guardrail_result import GuardrailResult
|
||||||
from crewai.tasks.output_format import OutputFormat
|
from crewai.tasks.output_format import OutputFormat
|
||||||
from crewai.tasks.task_output import TaskOutput
|
from crewai.tasks.task_output import TaskOutput
|
||||||
|
from crewai.telemetry.telemetry import Telemetry
|
||||||
from crewai.tools.base_tool import BaseTool
|
from crewai.tools.base_tool import BaseTool
|
||||||
from crewai.utilities.config import process_config
|
from crewai.utilities.config import process_config
|
||||||
from crewai.utilities.converter import Converter, convert_to_model
|
from crewai.utilities.converter import Converter, convert_to_model
|
||||||
from crewai.utilities.events import (
|
|
||||||
TaskCompletedEvent,
|
|
||||||
TaskFailedEvent,
|
|
||||||
TaskStartedEvent,
|
|
||||||
)
|
|
||||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
|
||||||
from crewai.utilities.i18n import I18N
|
from crewai.utilities.i18n import I18N
|
||||||
from crewai.utilities.printer import Printer
|
from crewai.utilities.printer import Printer
|
||||||
|
|
||||||
@@ -187,6 +183,8 @@ class Task(BaseModel):
|
|||||||
)
|
)
|
||||||
return v
|
return v
|
||||||
|
|
||||||
|
_telemetry: Telemetry = PrivateAttr(default_factory=Telemetry)
|
||||||
|
_execution_span: Optional[Span] = PrivateAttr(default=None)
|
||||||
_original_description: Optional[str] = PrivateAttr(default=None)
|
_original_description: Optional[str] = PrivateAttr(default=None)
|
||||||
_original_expected_output: Optional[str] = PrivateAttr(default=None)
|
_original_expected_output: Optional[str] = PrivateAttr(default=None)
|
||||||
_original_output_file: Optional[str] = PrivateAttr(default=None)
|
_original_output_file: Optional[str] = PrivateAttr(default=None)
|
||||||
@@ -350,102 +348,94 @@ class Task(BaseModel):
|
|||||||
tools: Optional[List[Any]],
|
tools: Optional[List[Any]],
|
||||||
) -> TaskOutput:
|
) -> TaskOutput:
|
||||||
"""Run the core execution logic of the task."""
|
"""Run the core execution logic of the task."""
|
||||||
try:
|
agent = agent or self.agent
|
||||||
agent = agent or self.agent
|
self.agent = agent
|
||||||
self.agent = agent
|
if not agent:
|
||||||
if not agent:
|
raise Exception(
|
||||||
raise Exception(
|
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
|
||||||
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
|
|
||||||
)
|
|
||||||
|
|
||||||
self.start_time = datetime.datetime.now()
|
|
||||||
|
|
||||||
self.prompt_context = context
|
|
||||||
tools = tools or self.tools or []
|
|
||||||
|
|
||||||
self.processed_by_agents.add(agent.role)
|
|
||||||
crewai_event_bus.emit(self, TaskStartedEvent(context=context))
|
|
||||||
result = agent.execute_task(
|
|
||||||
task=self,
|
|
||||||
context=context,
|
|
||||||
tools=tools,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
pydantic_output, json_output = self._export_output(result)
|
self.start_time = datetime.datetime.now()
|
||||||
task_output = TaskOutput(
|
self._execution_span = self._telemetry.task_started(crew=agent.crew, task=self)
|
||||||
name=self.name,
|
|
||||||
description=self.description,
|
|
||||||
expected_output=self.expected_output,
|
|
||||||
raw=result,
|
|
||||||
pydantic=pydantic_output,
|
|
||||||
json_dict=json_output,
|
|
||||||
agent=agent.role,
|
|
||||||
output_format=self._get_output_format(),
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.guardrail:
|
self.prompt_context = context
|
||||||
guardrail_result = GuardrailResult.from_tuple(
|
tools = tools or self.tools or []
|
||||||
self.guardrail(task_output)
|
|
||||||
)
|
|
||||||
if not guardrail_result.success:
|
|
||||||
if self.retry_count >= self.max_retries:
|
|
||||||
raise Exception(
|
|
||||||
f"Task failed guardrail validation after {self.max_retries} retries. "
|
|
||||||
f"Last error: {guardrail_result.error}"
|
|
||||||
)
|
|
||||||
|
|
||||||
self.retry_count += 1
|
self.processed_by_agents.add(agent.role)
|
||||||
context = self.i18n.errors("validation_error").format(
|
|
||||||
guardrail_result_error=guardrail_result.error,
|
|
||||||
task_output=task_output.raw,
|
|
||||||
)
|
|
||||||
printer = Printer()
|
|
||||||
printer.print(
|
|
||||||
content=f"Guardrail blocked, retrying, due to: {guardrail_result.error}\n",
|
|
||||||
color="yellow",
|
|
||||||
)
|
|
||||||
return self._execute_core(agent, context, tools)
|
|
||||||
|
|
||||||
if guardrail_result.result is None:
|
result = agent.execute_task(
|
||||||
|
task=self,
|
||||||
|
context=context,
|
||||||
|
tools=tools,
|
||||||
|
)
|
||||||
|
|
||||||
|
pydantic_output, json_output = self._export_output(result)
|
||||||
|
task_output = TaskOutput(
|
||||||
|
name=self.name,
|
||||||
|
description=self.description,
|
||||||
|
expected_output=self.expected_output,
|
||||||
|
raw=result,
|
||||||
|
pydantic=pydantic_output,
|
||||||
|
json_dict=json_output,
|
||||||
|
agent=agent.role,
|
||||||
|
output_format=self._get_output_format(),
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.guardrail:
|
||||||
|
guardrail_result = GuardrailResult.from_tuple(self.guardrail(task_output))
|
||||||
|
if not guardrail_result.success:
|
||||||
|
if self.retry_count >= self.max_retries:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"Task guardrail returned None as result. This is not allowed."
|
f"Task failed guardrail validation after {self.max_retries} retries. "
|
||||||
|
f"Last error: {guardrail_result.error}"
|
||||||
)
|
)
|
||||||
|
|
||||||
if isinstance(guardrail_result.result, str):
|
self.retry_count += 1
|
||||||
task_output.raw = guardrail_result.result
|
context = self.i18n.errors("validation_error").format(
|
||||||
pydantic_output, json_output = self._export_output(
|
guardrail_result_error=guardrail_result.error,
|
||||||
guardrail_result.result
|
task_output=task_output.raw,
|
||||||
)
|
|
||||||
task_output.pydantic = pydantic_output
|
|
||||||
task_output.json_dict = json_output
|
|
||||||
elif isinstance(guardrail_result.result, TaskOutput):
|
|
||||||
task_output = guardrail_result.result
|
|
||||||
|
|
||||||
self.output = task_output
|
|
||||||
self.end_time = datetime.datetime.now()
|
|
||||||
|
|
||||||
if self.callback:
|
|
||||||
self.callback(self.output)
|
|
||||||
|
|
||||||
crew = self.agent.crew # type: ignore[union-attr]
|
|
||||||
if crew and crew.task_callback and crew.task_callback != self.callback:
|
|
||||||
crew.task_callback(self.output)
|
|
||||||
|
|
||||||
if self.output_file:
|
|
||||||
content = (
|
|
||||||
json_output
|
|
||||||
if json_output
|
|
||||||
else pydantic_output.model_dump_json()
|
|
||||||
if pydantic_output
|
|
||||||
else result
|
|
||||||
)
|
)
|
||||||
self._save_file(content)
|
printer = Printer()
|
||||||
crewai_event_bus.emit(self, TaskCompletedEvent(output=task_output))
|
printer.print(
|
||||||
return task_output
|
content=f"Guardrail blocked, retrying, due to: {guardrail_result.error}\n",
|
||||||
except Exception as e:
|
color="yellow",
|
||||||
self.end_time = datetime.datetime.now()
|
)
|
||||||
crewai_event_bus.emit(self, TaskFailedEvent(error=str(e)))
|
return self._execute_core(agent, context, tools)
|
||||||
raise e # Re-raise the exception after emitting the event
|
|
||||||
|
if guardrail_result.result is None:
|
||||||
|
raise Exception(
|
||||||
|
"Task guardrail returned None as result. This is not allowed."
|
||||||
|
)
|
||||||
|
|
||||||
|
if isinstance(guardrail_result.result, str):
|
||||||
|
task_output.raw = guardrail_result.result
|
||||||
|
pydantic_output, json_output = self._export_output(
|
||||||
|
guardrail_result.result
|
||||||
|
)
|
||||||
|
task_output.pydantic = pydantic_output
|
||||||
|
task_output.json_dict = json_output
|
||||||
|
elif isinstance(guardrail_result.result, TaskOutput):
|
||||||
|
task_output = guardrail_result.result
|
||||||
|
|
||||||
|
self.output = task_output
|
||||||
|
self.end_time = datetime.datetime.now()
|
||||||
|
|
||||||
|
if self.callback:
|
||||||
|
self.callback(self.output)
|
||||||
|
|
||||||
|
if self._execution_span:
|
||||||
|
self._telemetry.task_ended(self._execution_span, self, agent.crew)
|
||||||
|
self._execution_span = None
|
||||||
|
|
||||||
|
if self.output_file:
|
||||||
|
content = (
|
||||||
|
json_output
|
||||||
|
if json_output
|
||||||
|
else pydantic_output.model_dump_json() if pydantic_output else result
|
||||||
|
)
|
||||||
|
self._save_file(content)
|
||||||
|
|
||||||
|
return task_output
|
||||||
|
|
||||||
def prompt(self) -> str:
|
def prompt(self) -> str:
|
||||||
"""Prompt the task.
|
"""Prompt the task.
|
||||||
@@ -462,7 +452,7 @@ class Task(BaseModel):
|
|||||||
return "\n".join(tasks_slices)
|
return "\n".join(tasks_slices)
|
||||||
|
|
||||||
def interpolate_inputs_and_add_conversation_history(
|
def interpolate_inputs_and_add_conversation_history(
|
||||||
self, inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]]
|
self, inputs: Dict[str, Union[str, int, float]]
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Interpolate inputs into the task description, expected output, and output file path.
|
"""Interpolate inputs into the task description, expected output, and output file path.
|
||||||
Add conversation history if present.
|
Add conversation history if present.
|
||||||
@@ -534,9 +524,7 @@ class Task(BaseModel):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def interpolate_only(
|
def interpolate_only(
|
||||||
self,
|
self, input_string: Optional[str], inputs: Dict[str, Union[str, int, float]]
|
||||||
input_string: Optional[str],
|
|
||||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]],
|
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Interpolate placeholders (e.g., {key}) in a string while leaving JSON untouched.
|
"""Interpolate placeholders (e.g., {key}) in a string while leaving JSON untouched.
|
||||||
|
|
||||||
@@ -544,39 +532,17 @@ class Task(BaseModel):
|
|||||||
input_string: The string containing template variables to interpolate.
|
input_string: The string containing template variables to interpolate.
|
||||||
Can be None or empty, in which case an empty string is returned.
|
Can be None or empty, in which case an empty string is returned.
|
||||||
inputs: Dictionary mapping template variables to their values.
|
inputs: Dictionary mapping template variables to their values.
|
||||||
Supported value types are strings, integers, floats, and dicts/lists
|
Supported value types are strings, integers, and floats.
|
||||||
containing only these types and other nested dicts/lists.
|
If input_string is empty or has no placeholders, inputs can be empty.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The interpolated string with all template variables replaced with their values.
|
The interpolated string with all template variables replaced with their values.
|
||||||
Empty string if input_string is None or empty.
|
Empty string if input_string is None or empty.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If a value contains unsupported types
|
ValueError: If a required template variable is missing from inputs.
|
||||||
|
KeyError: If a template variable is not found in the inputs dictionary.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Validation function for recursive type checking
|
|
||||||
def validate_type(value: Any) -> None:
|
|
||||||
if value is None:
|
|
||||||
return
|
|
||||||
if isinstance(value, (str, int, float, bool)):
|
|
||||||
return
|
|
||||||
if isinstance(value, (dict, list)):
|
|
||||||
for item in value.values() if isinstance(value, dict) else value:
|
|
||||||
validate_type(item)
|
|
||||||
return
|
|
||||||
raise ValueError(
|
|
||||||
f"Unsupported type {type(value).__name__} in inputs. "
|
|
||||||
"Only str, int, float, bool, dict, and list are allowed."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Validate all input values
|
|
||||||
for key, value in inputs.items():
|
|
||||||
try:
|
|
||||||
validate_type(value)
|
|
||||||
except ValueError as e:
|
|
||||||
raise ValueError(f"Invalid value for key '{key}': {str(e)}") from e
|
|
||||||
|
|
||||||
if input_string is None or not input_string:
|
if input_string is None or not input_string:
|
||||||
return ""
|
return ""
|
||||||
if "{" not in input_string and "}" not in input_string:
|
if "{" not in input_string and "}" not in input_string:
|
||||||
@@ -585,7 +551,15 @@ class Task(BaseModel):
|
|||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Inputs dictionary cannot be empty when interpolating variables"
|
"Inputs dictionary cannot be empty when interpolating variables"
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
# Validate input types
|
||||||
|
for key, value in inputs.items():
|
||||||
|
if not isinstance(value, (str, int, float)):
|
||||||
|
raise ValueError(
|
||||||
|
f"Value for key '{key}' must be a string, integer, or float, got {type(value).__name__}"
|
||||||
|
)
|
||||||
|
|
||||||
escaped_string = input_string.replace("{", "{{").replace("}", "}}")
|
escaped_string = input_string.replace("{", "{{").replace("}", "}}")
|
||||||
|
|
||||||
for key in inputs.keys():
|
for key in inputs.keys():
|
||||||
@@ -678,32 +652,19 @@ class Task(BaseModel):
|
|||||||
return OutputFormat.PYDANTIC
|
return OutputFormat.PYDANTIC
|
||||||
return OutputFormat.RAW
|
return OutputFormat.RAW
|
||||||
|
|
||||||
def _save_file(self, result: Union[Dict, str, Any]) -> None:
|
def _save_file(self, result: Any) -> None:
|
||||||
"""Save task output to a file.
|
"""Save task output to a file.
|
||||||
|
|
||||||
Note:
|
|
||||||
For cross-platform file writing, especially on Windows, consider using FileWriterTool
|
|
||||||
from the crewai_tools package:
|
|
||||||
pip install 'crewai[tools]'
|
|
||||||
from crewai_tools import FileWriterTool
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
result: The result to save to the file. Can be a dict or any stringifiable object.
|
result: The result to save to the file. Can be a dict or any stringifiable object.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If output_file is not set
|
ValueError: If output_file is not set
|
||||||
RuntimeError: If there is an error writing to the file. For cross-platform
|
RuntimeError: If there is an error writing to the file
|
||||||
compatibility, especially on Windows, use FileWriterTool from crewai_tools
|
|
||||||
package.
|
|
||||||
"""
|
"""
|
||||||
if self.output_file is None:
|
if self.output_file is None:
|
||||||
raise ValueError("output_file is not set.")
|
raise ValueError("output_file is not set.")
|
||||||
|
|
||||||
FILEWRITER_RECOMMENDATION = (
|
|
||||||
"For cross-platform file writing, especially on Windows, "
|
|
||||||
"use FileWriterTool from crewai_tools package."
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
resolved_path = Path(self.output_file).expanduser().resolve()
|
resolved_path = Path(self.output_file).expanduser().resolve()
|
||||||
directory = resolved_path.parent
|
directory = resolved_path.parent
|
||||||
@@ -719,11 +680,7 @@ class Task(BaseModel):
|
|||||||
else:
|
else:
|
||||||
file.write(str(result))
|
file.write(str(result))
|
||||||
except (OSError, IOError) as e:
|
except (OSError, IOError) as e:
|
||||||
raise RuntimeError(
|
raise RuntimeError(f"Failed to save output file: {e}")
|
||||||
"\n".join(
|
|
||||||
[f"Failed to save output file: {e}", FILEWRITER_RECOMMENDATION]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ from crewai.utilities import I18N
|
|||||||
|
|
||||||
i18n = I18N()
|
i18n = I18N()
|
||||||
|
|
||||||
|
|
||||||
class AddImageToolSchema(BaseModel):
|
class AddImageToolSchema(BaseModel):
|
||||||
image_url: str = Field(..., description="The URL or path of the image to add")
|
image_url: str = Field(..., description="The URL or path of the image to add")
|
||||||
action: Optional[str] = Field(
|
action: Optional[str] = Field(
|
||||||
default=None, description="Optional context or question about the image"
|
default=None,
|
||||||
|
description="Optional context or question about the image"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -36,7 +36,10 @@ class AddImageTool(BaseTool):
|
|||||||
"image_url": {
|
"image_url": {
|
||||||
"url": image_url,
|
"url": image_url,
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
return {"role": "user", "content": content}
|
return {
|
||||||
|
"role": "user",
|
||||||
|
"content": content
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,30 +1,28 @@
|
|||||||
import ast
|
import ast
|
||||||
import datetime
|
import datetime
|
||||||
import json
|
import json
|
||||||
|
import re
|
||||||
import time
|
import time
|
||||||
from difflib import SequenceMatcher
|
from difflib import SequenceMatcher
|
||||||
from json import JSONDecodeError
|
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
from typing import Any, Dict, List, Optional, Union
|
from typing import Any, Dict, List, Union
|
||||||
|
|
||||||
import json5
|
|
||||||
from json_repair import repair_json
|
from json_repair import repair_json
|
||||||
|
|
||||||
|
import crewai.utilities.events as events
|
||||||
from crewai.agents.tools_handler import ToolsHandler
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
from crewai.telemetry import Telemetry
|
from crewai.telemetry import Telemetry
|
||||||
from crewai.tools import BaseTool
|
from crewai.tools import BaseTool
|
||||||
from crewai.tools.structured_tool import CrewStructuredTool
|
from crewai.tools.structured_tool import CrewStructuredTool
|
||||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||||
|
from crewai.tools.tool_usage_events import ToolUsageError, ToolUsageFinished
|
||||||
from crewai.utilities import I18N, Converter, ConverterError, Printer
|
from crewai.utilities import I18N, Converter, ConverterError, Printer
|
||||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
|
||||||
from crewai.utilities.events.tool_usage_events import (
|
|
||||||
ToolSelectionErrorEvent,
|
|
||||||
ToolUsageErrorEvent,
|
|
||||||
ToolUsageFinishedEvent,
|
|
||||||
ToolValidateInputErrorEvent,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
import agentops # type: ignore
|
||||||
|
except ImportError:
|
||||||
|
agentops = None
|
||||||
OPENAI_BIGGER_MODELS = [
|
OPENAI_BIGGER_MODELS = [
|
||||||
"gpt-4",
|
"gpt-4",
|
||||||
"gpt-4o",
|
"gpt-4o",
|
||||||
@@ -137,6 +135,7 @@ class ToolUsage:
|
|||||||
tool: Any,
|
tool: Any,
|
||||||
calling: Union[ToolCalling, InstructorToolCalling],
|
calling: Union[ToolCalling, InstructorToolCalling],
|
||||||
) -> str: # TODO: Fix this return type
|
) -> str: # TODO: Fix this return type
|
||||||
|
tool_event = agentops.ToolEvent(name=calling.tool_name) if agentops else None # type: ignore
|
||||||
if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
|
if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
|
||||||
try:
|
try:
|
||||||
result = self._i18n.errors("task_repeated_usage").format(
|
result = self._i18n.errors("task_repeated_usage").format(
|
||||||
@@ -212,6 +211,10 @@ class ToolUsage:
|
|||||||
return error # type: ignore # No return value expected
|
return error # type: ignore # No return value expected
|
||||||
|
|
||||||
self.task.increment_tools_errors()
|
self.task.increment_tools_errors()
|
||||||
|
if agentops:
|
||||||
|
agentops.record(
|
||||||
|
agentops.ErrorEvent(exception=e, trigger_event=tool_event)
|
||||||
|
)
|
||||||
return self.use(calling=calling, tool_string=tool_string) # type: ignore # No return value expected
|
return self.use(calling=calling, tool_string=tool_string) # type: ignore # No return value expected
|
||||||
|
|
||||||
if self.tools_handler:
|
if self.tools_handler:
|
||||||
@@ -227,6 +230,9 @@ class ToolUsage:
|
|||||||
self.tools_handler.on_tool_use(
|
self.tools_handler.on_tool_use(
|
||||||
calling=calling, output=result, should_cache=should_cache
|
calling=calling, output=result, should_cache=should_cache
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if agentops:
|
||||||
|
agentops.record(tool_event)
|
||||||
self._telemetry.tool_usage(
|
self._telemetry.tool_usage(
|
||||||
llm=self.function_calling_llm,
|
llm=self.function_calling_llm,
|
||||||
tool_name=tool.name,
|
tool_name=tool.name,
|
||||||
@@ -301,33 +307,14 @@ class ToolUsage:
|
|||||||
):
|
):
|
||||||
return tool
|
return tool
|
||||||
self.task.increment_tools_errors()
|
self.task.increment_tools_errors()
|
||||||
tool_selection_data = {
|
|
||||||
"agent_key": self.agent.key,
|
|
||||||
"agent_role": self.agent.role,
|
|
||||||
"tool_name": tool_name,
|
|
||||||
"tool_args": {},
|
|
||||||
"tool_class": self.tools_description,
|
|
||||||
}
|
|
||||||
if tool_name and tool_name != "":
|
if tool_name and tool_name != "":
|
||||||
error = f"Action '{tool_name}' don't exist, these are the only available Actions:\n{self.tools_description}"
|
raise Exception(
|
||||||
crewai_event_bus.emit(
|
f"Action '{tool_name}' don't exist, these are the only available Actions:\n{self.tools_description}"
|
||||||
self,
|
|
||||||
ToolSelectionErrorEvent(
|
|
||||||
**tool_selection_data,
|
|
||||||
error=error,
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
raise Exception(error)
|
|
||||||
else:
|
else:
|
||||||
error = f"I forgot the Action name, these are the only available Actions: {self.tools_description}"
|
raise Exception(
|
||||||
crewai_event_bus.emit(
|
f"I forgot the Action name, these are the only available Actions: {self.tools_description}"
|
||||||
self,
|
|
||||||
ToolSelectionErrorEvent(
|
|
||||||
**tool_selection_data,
|
|
||||||
error=error,
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
raise Exception(error)
|
|
||||||
|
|
||||||
def _render(self) -> str:
|
def _render(self) -> str:
|
||||||
"""Render the tool name and description in plain text."""
|
"""Render the tool name and description in plain text."""
|
||||||
@@ -420,76 +407,34 @@ class ToolUsage:
|
|||||||
)
|
)
|
||||||
return self._tool_calling(tool_string)
|
return self._tool_calling(tool_string)
|
||||||
|
|
||||||
def _validate_tool_input(self, tool_input: Optional[str]) -> Dict[str, Any]:
|
def _validate_tool_input(self, tool_input: str) -> Dict[str, Any]:
|
||||||
if tool_input is None:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
if not isinstance(tool_input, str) or not tool_input.strip():
|
|
||||||
raise Exception(
|
|
||||||
"Tool input must be a valid dictionary in JSON or Python literal format"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Attempt 1: Parse as JSON
|
|
||||||
try:
|
try:
|
||||||
|
# Replace Python literals with JSON equivalents
|
||||||
|
replacements = {
|
||||||
|
r"'": '"',
|
||||||
|
r"None": "null",
|
||||||
|
r"True": "true",
|
||||||
|
r"False": "false",
|
||||||
|
}
|
||||||
|
for pattern, replacement in replacements.items():
|
||||||
|
tool_input = re.sub(pattern, replacement, tool_input)
|
||||||
|
|
||||||
arguments = json.loads(tool_input)
|
arguments = json.loads(tool_input)
|
||||||
if isinstance(arguments, dict):
|
except json.JSONDecodeError:
|
||||||
return arguments
|
# Attempt to repair JSON string
|
||||||
except (JSONDecodeError, TypeError):
|
|
||||||
pass # Continue to the next parsing attempt
|
|
||||||
|
|
||||||
# Attempt 2: Parse as Python literal
|
|
||||||
try:
|
|
||||||
arguments = ast.literal_eval(tool_input)
|
|
||||||
if isinstance(arguments, dict):
|
|
||||||
return arguments
|
|
||||||
except (ValueError, SyntaxError):
|
|
||||||
pass # Continue to the next parsing attempt
|
|
||||||
|
|
||||||
# Attempt 3: Parse as JSON5
|
|
||||||
try:
|
|
||||||
arguments = json5.loads(tool_input)
|
|
||||||
if isinstance(arguments, dict):
|
|
||||||
return arguments
|
|
||||||
except (JSONDecodeError, ValueError, TypeError):
|
|
||||||
pass # Continue to the next parsing attempt
|
|
||||||
|
|
||||||
# Attempt 4: Repair JSON
|
|
||||||
try:
|
|
||||||
repaired_input = repair_json(tool_input)
|
repaired_input = repair_json(tool_input)
|
||||||
self._printer.print(
|
try:
|
||||||
content=f"Repaired JSON: {repaired_input}", color="blue"
|
arguments = json.loads(repaired_input)
|
||||||
)
|
except json.JSONDecodeError as e:
|
||||||
arguments = json.loads(repaired_input)
|
raise Exception(f"Invalid tool input JSON: {e}")
|
||||||
if isinstance(arguments, dict):
|
|
||||||
return arguments
|
|
||||||
except Exception as e:
|
|
||||||
error = f"Failed to repair JSON: {e}"
|
|
||||||
self._printer.print(content=error, color="red")
|
|
||||||
|
|
||||||
error_message = (
|
return arguments
|
||||||
"Tool input must be a valid dictionary in JSON or Python literal format"
|
|
||||||
)
|
|
||||||
self._emit_validate_input_error(error_message)
|
|
||||||
# If all parsing attempts fail, raise an error
|
|
||||||
raise Exception(error_message)
|
|
||||||
|
|
||||||
def _emit_validate_input_error(self, final_error: str):
|
|
||||||
tool_selection_data = {
|
|
||||||
"agent_key": self.agent.key,
|
|
||||||
"agent_role": self.agent.role,
|
|
||||||
"tool_name": self.action.tool,
|
|
||||||
"tool_args": str(self.action.tool_input),
|
|
||||||
"tool_class": self.__class__.__name__,
|
|
||||||
}
|
|
||||||
|
|
||||||
crewai_event_bus.emit(
|
|
||||||
self,
|
|
||||||
ToolValidateInputErrorEvent(**tool_selection_data, error=final_error),
|
|
||||||
)
|
|
||||||
|
|
||||||
def on_tool_error(self, tool: Any, tool_calling: ToolCalling, e: Exception) -> None:
|
def on_tool_error(self, tool: Any, tool_calling: ToolCalling, e: Exception) -> None:
|
||||||
event_data = self._prepare_event_data(tool, tool_calling)
|
event_data = self._prepare_event_data(tool, tool_calling)
|
||||||
crewai_event_bus.emit(self, ToolUsageErrorEvent(**{**event_data, "error": e}))
|
events.emit(
|
||||||
|
source=self, event=ToolUsageError(**{**event_data, "error": str(e)})
|
||||||
|
)
|
||||||
|
|
||||||
def on_tool_use_finished(
|
def on_tool_use_finished(
|
||||||
self, tool: Any, tool_calling: ToolCalling, from_cache: bool, started_at: float
|
self, tool: Any, tool_calling: ToolCalling, from_cache: bool, started_at: float
|
||||||
@@ -503,7 +448,7 @@ class ToolUsage:
|
|||||||
"from_cache": from_cache,
|
"from_cache": from_cache,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
crewai_event_bus.emit(self, ToolUsageFinishedEvent(**event_data))
|
events.emit(source=self, event=ToolUsageFinished(**event_data))
|
||||||
|
|
||||||
def _prepare_event_data(self, tool: Any, tool_calling: ToolCalling) -> dict:
|
def _prepare_event_data(self, tool: Any, tool_calling: ToolCalling) -> dict:
|
||||||
return {
|
return {
|
||||||
|
|||||||
24
src/crewai/tools/tool_usage_events.py
Normal file
24
src/crewai/tools/tool_usage_events.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
from datetime import datetime
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
|
class ToolUsageEvent(BaseModel):
|
||||||
|
agent_key: str
|
||||||
|
agent_role: str
|
||||||
|
tool_name: str
|
||||||
|
tool_args: Dict[str, Any]
|
||||||
|
tool_class: str
|
||||||
|
run_attempts: int | None = None
|
||||||
|
delegations: int | None = None
|
||||||
|
|
||||||
|
|
||||||
|
class ToolUsageFinished(ToolUsageEvent):
|
||||||
|
started_at: datetime
|
||||||
|
finished_at: datetime
|
||||||
|
from_cache: bool = False
|
||||||
|
|
||||||
|
|
||||||
|
class ToolUsageError(ToolUsageEvent):
|
||||||
|
error: str
|
||||||
@@ -15,7 +15,7 @@
|
|||||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\n```\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n```",
|
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\n```\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n```",
|
||||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nHere is the expected format I must follow:\n\n```\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n This Thought/Action/Action Input/Result process can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
|
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nHere is the expected format I must follow:\n\n```\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n This Thought/Action/Action Input/Result process can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
|
||||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||||
"expected_output": "\nThis is the expected criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
|
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
|
||||||
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
||||||
"getting_input": "This is the agent's final answer: {final_answer}\n\n",
|
"getting_input": "This is the agent's final answer: {final_answer}\n\n",
|
||||||
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
|
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
|
||||||
@@ -23,8 +23,8 @@
|
|||||||
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
|
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
|
||||||
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
|
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||||
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
|
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
|
||||||
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
|
"human_feedback_classification": "Determine if the following feedback indicates that the user is satisfied or if further changes are needed. Respond with 'True' if further changes are needed, or 'False' if the user is satisfied. **Important** Do not include any additional commentary outside of your 'True' or 'False' response.\n\nFeedback: \"{feedback}\"",
|
||||||
"feedback_instructions": "User feedback: {feedback}\nInstructions: Use this feedback to enhance the next output iteration.\nNote: Do not respond or add commentary."
|
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals."
|
||||||
},
|
},
|
||||||
"errors": {
|
"errors": {
|
||||||
"force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}",
|
"force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}",
|
||||||
@@ -39,11 +39,11 @@
|
|||||||
"validation_error": "### Previous attempt failed validation: {guardrail_result_error}\n\n\n### Previous result:\n{task_output}\n\n\nTry again, making sure to address the validation error."
|
"validation_error": "### Previous attempt failed validation: {guardrail_result_error}\n\n\n### Previous result:\n{task_output}\n\n\nTry again, making sure to address the validation error."
|
||||||
},
|
},
|
||||||
"tools": {
|
"tools": {
|
||||||
"delegate_work": "Delegate a specific task to one of the following coworkers: {coworkers}\nThe input to this tool should be the coworker, the task you want them to do, and ALL necessary context to execute the task, they know nothing about the task, so share absolutely everything you know, don't reference things but instead explain them.",
|
"delegate_work": "Delegate a specific task to one of the following coworkers: {coworkers}\nThe input to this tool should be the coworker, the task you want them to do, and ALL necessary context to execute the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
|
||||||
"ask_question": "Ask a specific question to one of the following coworkers: {coworkers}\nThe input to this tool should be the coworker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolutely everything you know, don't reference things but instead explain them.",
|
"ask_question": "Ask a specific question to one of the following coworkers: {coworkers}\nThe input to this tool should be the coworker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolute everything you know, don't reference things but instead explain them.",
|
||||||
"add_image": {
|
"add_image": {
|
||||||
"name": "Add image to content",
|
"name": "Add image to content",
|
||||||
"description": "See image to understand its content, you can optionally ask a question about the image",
|
"description": "See image to understand it's content, you can optionally ask a question about the image",
|
||||||
"default_action": "Please provide a detailed description of this image, including all visual elements, context, and any notable details you can observe."
|
"default_action": "Please provide a detailed description of this image, including all visual elements, context, and any notable details you can observe."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,4 +4,3 @@ DEFAULT_SCORE_THRESHOLD = 0.35
|
|||||||
KNOWLEDGE_DIRECTORY = "knowledge"
|
KNOWLEDGE_DIRECTORY = "knowledge"
|
||||||
MAX_LLM_RETRY = 3
|
MAX_LLM_RETRY = 3
|
||||||
MAX_FILE_NAME_LENGTH = 255
|
MAX_FILE_NAME_LENGTH = 255
|
||||||
EMITTER_COLOR = "bold_blue"
|
|
||||||
|
|||||||
@@ -20,52 +20,23 @@ class ConverterError(Exception):
|
|||||||
class Converter(OutputConverter):
|
class Converter(OutputConverter):
|
||||||
"""Class that converts text into either pydantic or json."""
|
"""Class that converts text into either pydantic or json."""
|
||||||
|
|
||||||
def to_pydantic(self, current_attempt=1) -> BaseModel:
|
def to_pydantic(self, current_attempt=1):
|
||||||
"""Convert text to pydantic."""
|
"""Convert text to pydantic."""
|
||||||
try:
|
try:
|
||||||
if self.llm.supports_function_calling():
|
if self.llm.supports_function_calling():
|
||||||
result = self._create_instructor().to_pydantic()
|
return self._create_instructor().to_pydantic()
|
||||||
else:
|
else:
|
||||||
response = self.llm.call(
|
return self.llm.call(
|
||||||
[
|
[
|
||||||
{"role": "system", "content": self.instructions},
|
{"role": "system", "content": self.instructions},
|
||||||
{"role": "user", "content": self.text},
|
{"role": "user", "content": self.text},
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
try:
|
|
||||||
# Try to directly validate the response JSON
|
|
||||||
result = self.model.model_validate_json(response)
|
|
||||||
except ValidationError:
|
|
||||||
# If direct validation fails, attempt to extract valid JSON
|
|
||||||
result = handle_partial_json(response, self.model, False, None)
|
|
||||||
# Ensure result is a BaseModel instance
|
|
||||||
if not isinstance(result, BaseModel):
|
|
||||||
if isinstance(result, dict):
|
|
||||||
result = self.model.parse_obj(result)
|
|
||||||
elif isinstance(result, str):
|
|
||||||
try:
|
|
||||||
parsed = json.loads(result)
|
|
||||||
result = self.model.parse_obj(parsed)
|
|
||||||
except Exception as parse_err:
|
|
||||||
raise ConverterError(
|
|
||||||
f"Failed to convert partial JSON result into Pydantic: {parse_err}"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise ConverterError(
|
|
||||||
"handle_partial_json returned an unexpected type."
|
|
||||||
)
|
|
||||||
return result
|
|
||||||
except ValidationError as e:
|
|
||||||
if current_attempt < self.max_attempts:
|
|
||||||
return self.to_pydantic(current_attempt + 1)
|
|
||||||
raise ConverterError(
|
|
||||||
f"Failed to convert text into a Pydantic model due to validation error: {e}"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if current_attempt < self.max_attempts:
|
if current_attempt < self.max_attempts:
|
||||||
return self.to_pydantic(current_attempt + 1)
|
return self.to_pydantic(current_attempt + 1)
|
||||||
raise ConverterError(
|
return ConverterError(
|
||||||
f"Failed to convert text into a Pydantic model due to error: {e}"
|
f"Failed to convert text into a pydantic model due to the following error: {e}"
|
||||||
)
|
)
|
||||||
|
|
||||||
def to_json(self, current_attempt=1):
|
def to_json(self, current_attempt=1):
|
||||||
@@ -95,6 +66,7 @@ class Converter(OutputConverter):
|
|||||||
llm=self.llm,
|
llm=self.llm,
|
||||||
model=self.model,
|
model=self.model,
|
||||||
content=self.text,
|
content=self.text,
|
||||||
|
instructions=self.instructions,
|
||||||
)
|
)
|
||||||
return inst
|
return inst
|
||||||
|
|
||||||
@@ -215,19 +187,10 @@ def convert_with_instructions(
|
|||||||
|
|
||||||
|
|
||||||
def get_conversion_instructions(model: Type[BaseModel], llm: Any) -> str:
|
def get_conversion_instructions(model: Type[BaseModel], llm: Any) -> str:
|
||||||
instructions = "Please convert the following text into valid JSON."
|
instructions = "I'm gonna convert this raw text into valid JSON."
|
||||||
if llm.supports_function_calling():
|
if llm.supports_function_calling():
|
||||||
model_schema = PydanticSchemaParser(model=model).get_schema()
|
model_schema = PydanticSchemaParser(model=model).get_schema()
|
||||||
instructions += (
|
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
|
||||||
f"\n\nOutput ONLY the valid JSON and nothing else.\n\n"
|
|
||||||
f"The JSON must follow this schema exactly:\n```json\n{model_schema}\n```"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
model_description = generate_model_description(model)
|
|
||||||
instructions += (
|
|
||||||
f"\n\nOutput ONLY the valid JSON and nothing else.\n\n"
|
|
||||||
f"The JSON must follow this format exactly:\n{model_description}"
|
|
||||||
)
|
|
||||||
return instructions
|
return instructions
|
||||||
|
|
||||||
|
|
||||||
@@ -267,13 +230,9 @@ def generate_model_description(model: Type[BaseModel]) -> str:
|
|||||||
origin = get_origin(field_type)
|
origin = get_origin(field_type)
|
||||||
args = get_args(field_type)
|
args = get_args(field_type)
|
||||||
|
|
||||||
if origin is Union or (origin is None and len(args) > 0):
|
if origin is Union and type(None) in args:
|
||||||
# Handle both Union and the new '|' syntax
|
|
||||||
non_none_args = [arg for arg in args if arg is not type(None)]
|
non_none_args = [arg for arg in args if arg is not type(None)]
|
||||||
if len(non_none_args) == 1:
|
return f"Optional[{describe_field(non_none_args[0])}]"
|
||||||
return f"Optional[{describe_field(non_none_args[0])}]"
|
|
||||||
else:
|
|
||||||
return f"Optional[Union[{', '.join(describe_field(arg) for arg in non_none_args)}]]"
|
|
||||||
elif origin is list:
|
elif origin is list:
|
||||||
return f"List[{describe_field(args[0])}]"
|
return f"List[{describe_field(args[0])}]"
|
||||||
elif origin is dict:
|
elif origin is dict:
|
||||||
@@ -282,10 +241,8 @@ def generate_model_description(model: Type[BaseModel]) -> str:
|
|||||||
return f"Dict[{key_type}, {value_type}]"
|
return f"Dict[{key_type}, {value_type}]"
|
||||||
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
|
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
|
||||||
return generate_model_description(field_type)
|
return generate_model_description(field_type)
|
||||||
elif hasattr(field_type, "__name__"):
|
|
||||||
return field_type.__name__
|
|
||||||
else:
|
else:
|
||||||
return str(field_type)
|
return field_type.__name__
|
||||||
|
|
||||||
fields = model.__annotations__
|
fields = model.__annotations__
|
||||||
field_descriptions = [
|
field_descriptions = [
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import os
|
import os
|
||||||
from typing import Any, Dict, Optional, cast
|
from typing import Any, Dict, cast
|
||||||
|
|
||||||
from chromadb import Documents, EmbeddingFunction, Embeddings
|
from chromadb import Documents, EmbeddingFunction, Embeddings
|
||||||
from chromadb.api.types import validate_embedding_function
|
from chromadb.api.types import validate_embedding_function
|
||||||
@@ -14,16 +14,14 @@ class EmbeddingConfigurator:
|
|||||||
"vertexai": self._configure_vertexai,
|
"vertexai": self._configure_vertexai,
|
||||||
"google": self._configure_google,
|
"google": self._configure_google,
|
||||||
"cohere": self._configure_cohere,
|
"cohere": self._configure_cohere,
|
||||||
"voyageai": self._configure_voyageai,
|
|
||||||
"bedrock": self._configure_bedrock,
|
"bedrock": self._configure_bedrock,
|
||||||
"huggingface": self._configure_huggingface,
|
"huggingface": self._configure_huggingface,
|
||||||
"watson": self._configure_watson,
|
"watson": self._configure_watson,
|
||||||
"custom": self._configure_custom,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def configure_embedder(
|
def configure_embedder(
|
||||||
self,
|
self,
|
||||||
embedder_config: Optional[Dict[str, Any]] = None,
|
embedder_config: Dict[str, Any] | None = None,
|
||||||
) -> EmbeddingFunction:
|
) -> EmbeddingFunction:
|
||||||
"""Configures and returns an embedding function based on the provided config."""
|
"""Configures and returns an embedding function based on the provided config."""
|
||||||
if embedder_config is None:
|
if embedder_config is None:
|
||||||
@@ -31,19 +29,21 @@ class EmbeddingConfigurator:
|
|||||||
|
|
||||||
provider = embedder_config.get("provider")
|
provider = embedder_config.get("provider")
|
||||||
config = embedder_config.get("config", {})
|
config = embedder_config.get("config", {})
|
||||||
model_name = config.get("model") if provider != "custom" else None
|
model_name = config.get("model")
|
||||||
|
|
||||||
|
if isinstance(provider, EmbeddingFunction):
|
||||||
|
try:
|
||||||
|
validate_embedding_function(provider)
|
||||||
|
return provider
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f"Invalid custom embedding function: {str(e)}")
|
||||||
|
|
||||||
if provider not in self.embedding_functions:
|
if provider not in self.embedding_functions:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"Unsupported embedding provider: {provider}, supported providers: {list(self.embedding_functions.keys())}"
|
f"Unsupported embedding provider: {provider}, supported providers: {list(self.embedding_functions.keys())}"
|
||||||
)
|
)
|
||||||
|
|
||||||
embedding_function = self.embedding_functions[provider]
|
return self.embedding_functions[provider](config, model_name)
|
||||||
return (
|
|
||||||
embedding_function(config)
|
|
||||||
if provider == "custom"
|
|
||||||
else embedding_function(config, model_name)
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _create_default_embedding_function():
|
def _create_default_embedding_function():
|
||||||
@@ -64,13 +64,6 @@ class EmbeddingConfigurator:
|
|||||||
return OpenAIEmbeddingFunction(
|
return OpenAIEmbeddingFunction(
|
||||||
api_key=config.get("api_key") or os.getenv("OPENAI_API_KEY"),
|
api_key=config.get("api_key") or os.getenv("OPENAI_API_KEY"),
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
api_base=config.get("api_base", None),
|
|
||||||
api_type=config.get("api_type", None),
|
|
||||||
api_version=config.get("api_version", None),
|
|
||||||
default_headers=config.get("default_headers", None),
|
|
||||||
dimensions=config.get("dimensions", None),
|
|
||||||
deployment_id=config.get("deployment_id", None),
|
|
||||||
organization_id=config.get("organization_id", None),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -85,10 +78,6 @@ class EmbeddingConfigurator:
|
|||||||
api_type=config.get("api_type", "azure"),
|
api_type=config.get("api_type", "azure"),
|
||||||
api_version=config.get("api_version"),
|
api_version=config.get("api_version"),
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
default_headers=config.get("default_headers"),
|
|
||||||
dimensions=config.get("dimensions"),
|
|
||||||
deployment_id=config.get("deployment_id"),
|
|
||||||
organization_id=config.get("organization_id"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -111,8 +100,6 @@ class EmbeddingConfigurator:
|
|||||||
return GoogleVertexEmbeddingFunction(
|
return GoogleVertexEmbeddingFunction(
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
api_key=config.get("api_key"),
|
api_key=config.get("api_key"),
|
||||||
project_id=config.get("project_id"),
|
|
||||||
region=config.get("region"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -124,7 +111,6 @@ class EmbeddingConfigurator:
|
|||||||
return GoogleGenerativeAiEmbeddingFunction(
|
return GoogleGenerativeAiEmbeddingFunction(
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
api_key=config.get("api_key"),
|
api_key=config.get("api_key"),
|
||||||
task_type=config.get("task_type"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -138,28 +124,15 @@ class EmbeddingConfigurator:
|
|||||||
api_key=config.get("api_key"),
|
api_key=config.get("api_key"),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _configure_voyageai(config, model_name):
|
|
||||||
from chromadb.utils.embedding_functions.voyageai_embedding_function import (
|
|
||||||
VoyageAIEmbeddingFunction,
|
|
||||||
)
|
|
||||||
|
|
||||||
return VoyageAIEmbeddingFunction(
|
|
||||||
model_name=model_name,
|
|
||||||
api_key=config.get("api_key"),
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_bedrock(config, model_name):
|
def _configure_bedrock(config, model_name):
|
||||||
from chromadb.utils.embedding_functions.amazon_bedrock_embedding_function import (
|
from chromadb.utils.embedding_functions.amazon_bedrock_embedding_function import (
|
||||||
AmazonBedrockEmbeddingFunction,
|
AmazonBedrockEmbeddingFunction,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Allow custom model_name override with backwards compatibility
|
return AmazonBedrockEmbeddingFunction(
|
||||||
kwargs = {"session": config.get("session")}
|
session=config.get("session"),
|
||||||
if model_name is not None:
|
)
|
||||||
kwargs["model_name"] = model_name
|
|
||||||
return AmazonBedrockEmbeddingFunction(**kwargs)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_huggingface(config, model_name):
|
def _configure_huggingface(config, model_name):
|
||||||
@@ -209,28 +182,3 @@ class EmbeddingConfigurator:
|
|||||||
raise e
|
raise e
|
||||||
|
|
||||||
return WatsonEmbeddingFunction()
|
return WatsonEmbeddingFunction()
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _configure_custom(config):
|
|
||||||
custom_embedder = config.get("embedder")
|
|
||||||
if isinstance(custom_embedder, EmbeddingFunction):
|
|
||||||
try:
|
|
||||||
validate_embedding_function(custom_embedder)
|
|
||||||
return custom_embedder
|
|
||||||
except Exception as e:
|
|
||||||
raise ValueError(f"Invalid custom embedding function: {str(e)}")
|
|
||||||
elif callable(custom_embedder):
|
|
||||||
try:
|
|
||||||
instance = custom_embedder()
|
|
||||||
if isinstance(instance, EmbeddingFunction):
|
|
||||||
validate_embedding_function(instance)
|
|
||||||
return instance
|
|
||||||
raise ValueError(
|
|
||||||
"Custom embedder does not create an EmbeddingFunction instance"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
raise ValueError(f"Error instantiating custom embedder: {str(e)}")
|
|
||||||
else:
|
|
||||||
raise ValueError(
|
|
||||||
"Custom embedder must be an instance of `EmbeddingFunction` or a callable that creates one"
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -1,39 +0,0 @@
|
|||||||
"""Error message definitions for CrewAI database operations."""
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
|
|
||||||
class DatabaseOperationError(Exception):
|
|
||||||
"""Base exception class for database operation errors."""
|
|
||||||
|
|
||||||
def __init__(self, message: str, original_error: Optional[Exception] = None):
|
|
||||||
"""Initialize the database operation error.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
message: The error message to display
|
|
||||||
original_error: The original exception that caused this error, if any
|
|
||||||
"""
|
|
||||||
super().__init__(message)
|
|
||||||
self.original_error = original_error
|
|
||||||
|
|
||||||
|
|
||||||
class DatabaseError:
|
|
||||||
"""Standardized error message templates for database operations."""
|
|
||||||
|
|
||||||
INIT_ERROR: str = "Database initialization error: {}"
|
|
||||||
SAVE_ERROR: str = "Error saving task outputs: {}"
|
|
||||||
UPDATE_ERROR: str = "Error updating task outputs: {}"
|
|
||||||
LOAD_ERROR: str = "Error loading task outputs: {}"
|
|
||||||
DELETE_ERROR: str = "Error deleting task outputs: {}"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def format_error(cls, template: str, error: Exception) -> str:
|
|
||||||
"""Format an error message with the given template and error.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
template: The error message template to use
|
|
||||||
error: The exception to format into the template
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The formatted error message
|
|
||||||
"""
|
|
||||||
return template.format(str(error))
|
|
||||||
@@ -1,12 +1,11 @@
|
|||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
from pydantic import BaseModel, Field, InstanceOf
|
from pydantic import BaseModel, Field
|
||||||
from rich.box import HEAVY_EDGE
|
from rich.box import HEAVY_EDGE
|
||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
from rich.table import Table
|
from rich.table import Table
|
||||||
|
|
||||||
from crewai.agent import Agent
|
from crewai.agent import Agent
|
||||||
from crewai.llm import LLM
|
|
||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
from crewai.tasks.task_output import TaskOutput
|
from crewai.tasks.task_output import TaskOutput
|
||||||
from crewai.telemetry import Telemetry
|
from crewai.telemetry import Telemetry
|
||||||
@@ -24,7 +23,7 @@ class CrewEvaluator:
|
|||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
crew (Crew): The crew of agents to evaluate.
|
crew (Crew): The crew of agents to evaluate.
|
||||||
eval_llm (LLM): Language model instance to use for evaluations
|
openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted).
|
||||||
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
|
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
|
||||||
iteration (int): The current iteration of the evaluation.
|
iteration (int): The current iteration of the evaluation.
|
||||||
"""
|
"""
|
||||||
@@ -33,9 +32,9 @@ class CrewEvaluator:
|
|||||||
run_execution_times: defaultdict = defaultdict(list)
|
run_execution_times: defaultdict = defaultdict(list)
|
||||||
iteration: int = 0
|
iteration: int = 0
|
||||||
|
|
||||||
def __init__(self, crew, eval_llm: InstanceOf[LLM]):
|
def __init__(self, crew, openai_model_name: str):
|
||||||
self.crew = crew
|
self.crew = crew
|
||||||
self.llm = eval_llm
|
self.openai_model_name = openai_model_name
|
||||||
self._telemetry = Telemetry()
|
self._telemetry = Telemetry()
|
||||||
self._setup_for_evaluating()
|
self._setup_for_evaluating()
|
||||||
|
|
||||||
@@ -52,7 +51,7 @@ class CrewEvaluator:
|
|||||||
),
|
),
|
||||||
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
|
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
|
||||||
verbose=False,
|
verbose=False,
|
||||||
llm=self.llm,
|
llm=self.openai_model_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _evaluation_task(
|
def _evaluation_task(
|
||||||
@@ -182,7 +181,7 @@ class CrewEvaluator:
|
|||||||
self.crew,
|
self.crew,
|
||||||
evaluation_result.pydantic.quality,
|
evaluation_result.pydantic.quality,
|
||||||
current_task.execution_duration,
|
current_task.execution_duration,
|
||||||
self.llm.model,
|
self.openai_model_name,
|
||||||
)
|
)
|
||||||
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
|
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
|
||||||
self.run_execution_times[self.iteration].append(
|
self.run_execution_times[self.iteration].append(
|
||||||
|
|||||||
@@ -3,9 +3,19 @@ from typing import List
|
|||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from crewai.utilities import Converter
|
from crewai.utilities import Converter
|
||||||
from crewai.utilities.events import TaskEvaluationEvent, crewai_event_bus
|
|
||||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||||
|
|
||||||
|
agentops = None
|
||||||
|
try:
|
||||||
|
from agentops import track_agent # type: ignore
|
||||||
|
except ImportError:
|
||||||
|
|
||||||
|
def track_agent(name):
|
||||||
|
def noop(f):
|
||||||
|
return f
|
||||||
|
|
||||||
|
return noop
|
||||||
|
|
||||||
|
|
||||||
class Entity(BaseModel):
|
class Entity(BaseModel):
|
||||||
name: str = Field(description="The name of the entity.")
|
name: str = Field(description="The name of the entity.")
|
||||||
@@ -38,15 +48,12 @@ class TrainingTaskEvaluation(BaseModel):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@track_agent(name="Task Evaluator")
|
||||||
class TaskEvaluator:
|
class TaskEvaluator:
|
||||||
def __init__(self, original_agent):
|
def __init__(self, original_agent):
|
||||||
self.llm = original_agent.llm
|
self.llm = original_agent.llm
|
||||||
self.original_agent = original_agent
|
|
||||||
|
|
||||||
def evaluate(self, task, output) -> TaskEvaluation:
|
def evaluate(self, task, output) -> TaskEvaluation:
|
||||||
crewai_event_bus.emit(
|
|
||||||
self, TaskEvaluationEvent(evaluation_type="task_evaluation")
|
|
||||||
)
|
|
||||||
evaluation_query = (
|
evaluation_query = (
|
||||||
f"Assess the quality of the task completed based on the description, expected output, and actual results.\n\n"
|
f"Assess the quality of the task completed based on the description, expected output, and actual results.\n\n"
|
||||||
f"Task Description:\n{task.description}\n\n"
|
f"Task Description:\n{task.description}\n\n"
|
||||||
@@ -83,39 +90,15 @@ class TaskEvaluator:
|
|||||||
- training_data (dict): The training data to be evaluated.
|
- training_data (dict): The training data to be evaluated.
|
||||||
- agent_id (str): The ID of the agent.
|
- agent_id (str): The ID of the agent.
|
||||||
"""
|
"""
|
||||||
crewai_event_bus.emit(
|
|
||||||
self, TaskEvaluationEvent(evaluation_type="training_data_evaluation")
|
|
||||||
)
|
|
||||||
|
|
||||||
output_training_data = training_data[agent_id]
|
output_training_data = training_data[agent_id]
|
||||||
|
|
||||||
final_aggregated_data = ""
|
final_aggregated_data = ""
|
||||||
|
for _, data in output_training_data.items():
|
||||||
for iteration, data in output_training_data.items():
|
|
||||||
improved_output = data.get("improved_output")
|
|
||||||
initial_output = data.get("initial_output")
|
|
||||||
human_feedback = data.get("human_feedback")
|
|
||||||
|
|
||||||
if not all([improved_output, initial_output, human_feedback]):
|
|
||||||
missing_fields = [
|
|
||||||
field
|
|
||||||
for field in ["improved_output", "initial_output", "human_feedback"]
|
|
||||||
if not data.get(field)
|
|
||||||
]
|
|
||||||
error_msg = (
|
|
||||||
f"Critical training data error: Missing fields ({', '.join(missing_fields)}) "
|
|
||||||
f"for agent {agent_id} in iteration {iteration}.\n"
|
|
||||||
"This indicates a broken training process. "
|
|
||||||
"Cannot proceed with evaluation.\n"
|
|
||||||
"Please check your training implementation."
|
|
||||||
)
|
|
||||||
raise ValueError(error_msg)
|
|
||||||
|
|
||||||
final_aggregated_data += (
|
final_aggregated_data += (
|
||||||
f"Iteration: {iteration}\n"
|
f"Initial Output:\n{data['initial_output']}\n\n"
|
||||||
f"Initial Output:\n{initial_output}\n\n"
|
f"Human Feedback:\n{data['human_feedback']}\n\n"
|
||||||
f"Human Feedback:\n{human_feedback}\n\n"
|
f"Improved Output:\n{data['improved_output']}\n\n"
|
||||||
f"Improved Output:\n{improved_output}\n\n"
|
|
||||||
"------------------------------------------------\n\n"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
evaluation_query = (
|
evaluation_query = (
|
||||||
|
|||||||
44
src/crewai/utilities/events.py
Normal file
44
src/crewai/utilities/events.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
from functools import wraps
|
||||||
|
from typing import Any, Callable, Dict, Generic, List, Type, TypeVar
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
T = TypeVar("T")
|
||||||
|
EVT = TypeVar("EVT", bound=BaseModel)
|
||||||
|
|
||||||
|
|
||||||
|
class Emitter(Generic[T, EVT]):
|
||||||
|
_listeners: Dict[Type[EVT], List[Callable]] = {}
|
||||||
|
|
||||||
|
def on(self, event_type: Type[EVT]):
|
||||||
|
def decorator(func: Callable):
|
||||||
|
@wraps(func)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
|
||||||
|
self._listeners.setdefault(event_type, []).append(wrapper)
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
def emit(self, source: T, event: EVT) -> None:
|
||||||
|
event_type = type(event)
|
||||||
|
for func in self._listeners.get(event_type, []):
|
||||||
|
func(source, event)
|
||||||
|
|
||||||
|
|
||||||
|
default_emitter = Emitter[Any, BaseModel]()
|
||||||
|
|
||||||
|
|
||||||
|
def emit(source: Any, event: BaseModel, raise_on_error: bool = False) -> None:
|
||||||
|
try:
|
||||||
|
default_emitter.emit(source, event)
|
||||||
|
except Exception as e:
|
||||||
|
if raise_on_error:
|
||||||
|
raise e
|
||||||
|
else:
|
||||||
|
print(f"Error emitting event: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def on(event_type: Type[BaseModel]) -> Callable:
|
||||||
|
return default_emitter.on(event_type)
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
from .crew_events import (
|
|
||||||
CrewKickoffStartedEvent,
|
|
||||||
CrewKickoffCompletedEvent,
|
|
||||||
CrewKickoffFailedEvent,
|
|
||||||
CrewTrainStartedEvent,
|
|
||||||
CrewTrainCompletedEvent,
|
|
||||||
CrewTrainFailedEvent,
|
|
||||||
CrewTestStartedEvent,
|
|
||||||
CrewTestCompletedEvent,
|
|
||||||
CrewTestFailedEvent,
|
|
||||||
)
|
|
||||||
from .agent_events import (
|
|
||||||
AgentExecutionStartedEvent,
|
|
||||||
AgentExecutionCompletedEvent,
|
|
||||||
AgentExecutionErrorEvent,
|
|
||||||
)
|
|
||||||
from .task_events import TaskStartedEvent, TaskCompletedEvent, TaskFailedEvent, TaskEvaluationEvent
|
|
||||||
from .flow_events import (
|
|
||||||
FlowCreatedEvent,
|
|
||||||
FlowStartedEvent,
|
|
||||||
FlowFinishedEvent,
|
|
||||||
FlowPlotEvent,
|
|
||||||
MethodExecutionStartedEvent,
|
|
||||||
MethodExecutionFinishedEvent,
|
|
||||||
MethodExecutionFailedEvent,
|
|
||||||
)
|
|
||||||
from .crewai_event_bus import CrewAIEventsBus, crewai_event_bus
|
|
||||||
from .tool_usage_events import (
|
|
||||||
ToolUsageFinishedEvent,
|
|
||||||
ToolUsageErrorEvent,
|
|
||||||
ToolUsageStartedEvent,
|
|
||||||
ToolExecutionErrorEvent,
|
|
||||||
ToolSelectionErrorEvent,
|
|
||||||
ToolUsageEvent,
|
|
||||||
ToolValidateInputErrorEvent,
|
|
||||||
)
|
|
||||||
from .llm_events import LLMCallCompletedEvent, LLMCallFailedEvent, LLMCallStartedEvent
|
|
||||||
|
|
||||||
# events
|
|
||||||
from .event_listener import EventListener
|
|
||||||
from .third_party.agentops_listener import agentops_listener
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, Union
|
|
||||||
|
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
|
||||||
from crewai.tools.base_tool import BaseTool
|
|
||||||
from crewai.tools.structured_tool import CrewStructuredTool
|
|
||||||
|
|
||||||
from .base_events import CrewEvent
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
|
||||||
|
|
||||||
|
|
||||||
class AgentExecutionStartedEvent(CrewEvent):
|
|
||||||
"""Event emitted when an agent starts executing a task"""
|
|
||||||
|
|
||||||
agent: BaseAgent
|
|
||||||
task: Any
|
|
||||||
tools: Optional[Sequence[Union[BaseTool, CrewStructuredTool]]]
|
|
||||||
task_prompt: str
|
|
||||||
type: str = "agent_execution_started"
|
|
||||||
|
|
||||||
model_config = {"arbitrary_types_allowed": True}
|
|
||||||
|
|
||||||
|
|
||||||
class AgentExecutionCompletedEvent(CrewEvent):
|
|
||||||
"""Event emitted when an agent completes executing a task"""
|
|
||||||
|
|
||||||
agent: BaseAgent
|
|
||||||
task: Any
|
|
||||||
output: str
|
|
||||||
type: str = "agent_execution_completed"
|
|
||||||
|
|
||||||
|
|
||||||
class AgentExecutionErrorEvent(CrewEvent):
|
|
||||||
"""Event emitted when an agent encounters an error during execution"""
|
|
||||||
|
|
||||||
agent: BaseAgent
|
|
||||||
task: Any
|
|
||||||
error: str
|
|
||||||
type: str = "agent_execution_error"
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
from abc import ABC, abstractmethod
|
|
||||||
from logging import Logger
|
|
||||||
|
|
||||||
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus, crewai_event_bus
|
|
||||||
|
|
||||||
|
|
||||||
class BaseEventListener(ABC):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
self.setup_listeners(crewai_event_bus)
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def setup_listeners(self, crewai_event_bus: CrewAIEventsBus):
|
|
||||||
pass
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
from datetime import datetime
|
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
|
|
||||||
class CrewEvent(BaseModel):
|
|
||||||
"""Base class for all crew events"""
|
|
||||||
|
|
||||||
timestamp: datetime = Field(default_factory=datetime.now)
|
|
||||||
type: str
|
|
||||||
@@ -1,81 +0,0 @@
|
|||||||
from typing import Any, Dict, Optional, Union
|
|
||||||
|
|
||||||
from pydantic import InstanceOf
|
|
||||||
|
|
||||||
from crewai.utilities.events.base_events import CrewEvent
|
|
||||||
|
|
||||||
|
|
||||||
class CrewKickoffStartedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a crew starts execution"""
|
|
||||||
|
|
||||||
crew_name: Optional[str]
|
|
||||||
inputs: Optional[Dict[str, Any]]
|
|
||||||
type: str = "crew_kickoff_started"
|
|
||||||
|
|
||||||
|
|
||||||
class CrewKickoffCompletedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a crew completes execution"""
|
|
||||||
|
|
||||||
crew_name: Optional[str]
|
|
||||||
output: Any
|
|
||||||
type: str = "crew_kickoff_completed"
|
|
||||||
|
|
||||||
|
|
||||||
class CrewKickoffFailedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a crew fails to complete execution"""
|
|
||||||
|
|
||||||
error: str
|
|
||||||
crew_name: Optional[str]
|
|
||||||
type: str = "crew_kickoff_failed"
|
|
||||||
|
|
||||||
|
|
||||||
class CrewTrainStartedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a crew starts training"""
|
|
||||||
|
|
||||||
crew_name: Optional[str]
|
|
||||||
n_iterations: int
|
|
||||||
filename: str
|
|
||||||
inputs: Optional[Dict[str, Any]]
|
|
||||||
type: str = "crew_train_started"
|
|
||||||
|
|
||||||
|
|
||||||
class CrewTrainCompletedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a crew completes training"""
|
|
||||||
|
|
||||||
crew_name: Optional[str]
|
|
||||||
n_iterations: int
|
|
||||||
filename: str
|
|
||||||
type: str = "crew_train_completed"
|
|
||||||
|
|
||||||
|
|
||||||
class CrewTrainFailedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a crew fails to complete training"""
|
|
||||||
|
|
||||||
error: str
|
|
||||||
crew_name: Optional[str]
|
|
||||||
type: str = "crew_train_failed"
|
|
||||||
|
|
||||||
|
|
||||||
class CrewTestStartedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a crew starts testing"""
|
|
||||||
|
|
||||||
crew_name: Optional[str]
|
|
||||||
n_iterations: int
|
|
||||||
eval_llm: Optional[Union[str, Any]]
|
|
||||||
inputs: Optional[Dict[str, Any]]
|
|
||||||
type: str = "crew_test_started"
|
|
||||||
|
|
||||||
|
|
||||||
class CrewTestCompletedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a crew completes testing"""
|
|
||||||
|
|
||||||
crew_name: Optional[str]
|
|
||||||
type: str = "crew_test_completed"
|
|
||||||
|
|
||||||
|
|
||||||
class CrewTestFailedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a crew fails to complete testing"""
|
|
||||||
|
|
||||||
error: str
|
|
||||||
crew_name: Optional[str]
|
|
||||||
type: str = "crew_test_failed"
|
|
||||||
@@ -1,113 +0,0 @@
|
|||||||
import threading
|
|
||||||
from contextlib import contextmanager
|
|
||||||
from typing import Any, Callable, Dict, List, Type, TypeVar, cast
|
|
||||||
|
|
||||||
from blinker import Signal
|
|
||||||
|
|
||||||
from crewai.utilities.events.base_events import CrewEvent
|
|
||||||
from crewai.utilities.events.event_types import EventTypes
|
|
||||||
|
|
||||||
EventT = TypeVar("EventT", bound=CrewEvent)
|
|
||||||
|
|
||||||
|
|
||||||
class CrewAIEventsBus:
|
|
||||||
"""
|
|
||||||
A singleton event bus that uses blinker signals for event handling.
|
|
||||||
Allows both internal (Flow/Crew) and external event handling.
|
|
||||||
"""
|
|
||||||
|
|
||||||
_instance = None
|
|
||||||
_lock = threading.Lock()
|
|
||||||
|
|
||||||
def __new__(cls):
|
|
||||||
if cls._instance is None:
|
|
||||||
with cls._lock:
|
|
||||||
if cls._instance is None: # prevent race condition
|
|
||||||
cls._instance = super(CrewAIEventsBus, cls).__new__(cls)
|
|
||||||
cls._instance._initialize()
|
|
||||||
return cls._instance
|
|
||||||
|
|
||||||
def _initialize(self) -> None:
|
|
||||||
"""Initialize the event bus internal state"""
|
|
||||||
self._signal = Signal("crewai_event_bus")
|
|
||||||
self._handlers: Dict[Type[CrewEvent], List[Callable]] = {}
|
|
||||||
|
|
||||||
def on(
|
|
||||||
self, event_type: Type[EventT]
|
|
||||||
) -> Callable[[Callable[[Any, EventT], None]], Callable[[Any, EventT], None]]:
|
|
||||||
"""
|
|
||||||
Decorator to register an event handler for a specific event type.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
@crewai_event_bus.on(AgentExecutionCompletedEvent)
|
|
||||||
def on_agent_execution_completed(
|
|
||||||
source: Any, event: AgentExecutionCompletedEvent
|
|
||||||
):
|
|
||||||
print(f"👍 Agent '{event.agent}' completed task")
|
|
||||||
print(f" Output: {event.output}")
|
|
||||||
"""
|
|
||||||
|
|
||||||
def decorator(
|
|
||||||
handler: Callable[[Any, EventT], None],
|
|
||||||
) -> Callable[[Any, EventT], None]:
|
|
||||||
if event_type not in self._handlers:
|
|
||||||
self._handlers[event_type] = []
|
|
||||||
self._handlers[event_type].append(
|
|
||||||
cast(Callable[[Any, EventT], None], handler)
|
|
||||||
)
|
|
||||||
return handler
|
|
||||||
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
def emit(self, source: Any, event: CrewEvent) -> None:
|
|
||||||
"""
|
|
||||||
Emit an event to all registered handlers
|
|
||||||
|
|
||||||
Args:
|
|
||||||
source: The object emitting the event
|
|
||||||
event: The event instance to emit
|
|
||||||
"""
|
|
||||||
event_type = type(event)
|
|
||||||
if event_type in self._handlers:
|
|
||||||
for handler in self._handlers[event_type]:
|
|
||||||
handler(source, event)
|
|
||||||
self._signal.send(source, event=event)
|
|
||||||
|
|
||||||
def clear_handlers(self) -> None:
|
|
||||||
"""Clear all registered event handlers - useful for testing"""
|
|
||||||
self._handlers.clear()
|
|
||||||
|
|
||||||
def register_handler(
|
|
||||||
self, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None]
|
|
||||||
) -> None:
|
|
||||||
"""Register an event handler for a specific event type"""
|
|
||||||
if event_type not in self._handlers:
|
|
||||||
self._handlers[event_type] = []
|
|
||||||
self._handlers[event_type].append(
|
|
||||||
cast(Callable[[Any, EventTypes], None], handler)
|
|
||||||
)
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def scoped_handlers(self):
|
|
||||||
"""
|
|
||||||
Context manager for temporary event handling scope.
|
|
||||||
Useful for testing or temporary event handling.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
with crewai_event_bus.scoped_handlers():
|
|
||||||
@crewai_event_bus.on(CrewKickoffStarted)
|
|
||||||
def temp_handler(source, event):
|
|
||||||
print("Temporary handler")
|
|
||||||
# Do stuff...
|
|
||||||
# Handlers are cleared after the context
|
|
||||||
"""
|
|
||||||
previous_handlers = self._handlers.copy()
|
|
||||||
self._handlers.clear()
|
|
||||||
try:
|
|
||||||
yield
|
|
||||||
finally:
|
|
||||||
self._handlers = previous_handlers
|
|
||||||
|
|
||||||
|
|
||||||
# Global instance
|
|
||||||
crewai_event_bus = CrewAIEventsBus()
|
|
||||||
@@ -1,288 +0,0 @@
|
|||||||
from typing import Any, Dict
|
|
||||||
|
|
||||||
from pydantic import Field, PrivateAttr
|
|
||||||
|
|
||||||
from crewai.task import Task
|
|
||||||
from crewai.telemetry.telemetry import Telemetry
|
|
||||||
from crewai.utilities import Logger
|
|
||||||
from crewai.utilities.constants import EMITTER_COLOR
|
|
||||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
|
||||||
from crewai.utilities.events.llm_events import (
|
|
||||||
LLMCallCompletedEvent,
|
|
||||||
LLMCallFailedEvent,
|
|
||||||
LLMCallStartedEvent,
|
|
||||||
)
|
|
||||||
|
|
||||||
from .agent_events import AgentExecutionCompletedEvent, AgentExecutionStartedEvent
|
|
||||||
from .crew_events import (
|
|
||||||
CrewKickoffCompletedEvent,
|
|
||||||
CrewKickoffFailedEvent,
|
|
||||||
CrewKickoffStartedEvent,
|
|
||||||
CrewTestCompletedEvent,
|
|
||||||
CrewTestFailedEvent,
|
|
||||||
CrewTestStartedEvent,
|
|
||||||
CrewTrainCompletedEvent,
|
|
||||||
CrewTrainFailedEvent,
|
|
||||||
CrewTrainStartedEvent,
|
|
||||||
)
|
|
||||||
from .flow_events import (
|
|
||||||
FlowCreatedEvent,
|
|
||||||
FlowFinishedEvent,
|
|
||||||
FlowStartedEvent,
|
|
||||||
MethodExecutionFailedEvent,
|
|
||||||
MethodExecutionFinishedEvent,
|
|
||||||
MethodExecutionStartedEvent,
|
|
||||||
)
|
|
||||||
from .task_events import TaskCompletedEvent, TaskFailedEvent, TaskStartedEvent
|
|
||||||
from .tool_usage_events import (
|
|
||||||
ToolUsageErrorEvent,
|
|
||||||
ToolUsageFinishedEvent,
|
|
||||||
ToolUsageStartedEvent,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class EventListener(BaseEventListener):
|
|
||||||
_instance = None
|
|
||||||
_telemetry: Telemetry = PrivateAttr(default_factory=lambda: Telemetry())
|
|
||||||
logger = Logger(verbose=True, default_color=EMITTER_COLOR)
|
|
||||||
execution_spans: Dict[Task, Any] = Field(default_factory=dict)
|
|
||||||
|
|
||||||
def __new__(cls):
|
|
||||||
if cls._instance is None:
|
|
||||||
cls._instance = super().__new__(cls)
|
|
||||||
cls._instance._initialized = False
|
|
||||||
return cls._instance
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
if not hasattr(self, "_initialized") or not self._initialized:
|
|
||||||
super().__init__()
|
|
||||||
self._telemetry = Telemetry()
|
|
||||||
self._telemetry.set_tracer()
|
|
||||||
self.execution_spans = {}
|
|
||||||
self._initialized = True
|
|
||||||
|
|
||||||
# ----------- CREW EVENTS -----------
|
|
||||||
|
|
||||||
def setup_listeners(self, crewai_event_bus):
|
|
||||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
|
||||||
def on_crew_started(source, event: CrewKickoffStartedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"🚀 Crew '{event.crew_name}' started, {source.id}",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
self._telemetry.crew_execution_span(source, event.inputs)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
|
||||||
def on_crew_completed(source, event: CrewKickoffCompletedEvent):
|
|
||||||
final_string_output = event.output.raw
|
|
||||||
self._telemetry.end_crew(source, final_string_output)
|
|
||||||
self.logger.log(
|
|
||||||
f"✅ Crew '{event.crew_name}' completed, {source.id}",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(CrewKickoffFailedEvent)
|
|
||||||
def on_crew_failed(source, event: CrewKickoffFailedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"❌ Crew '{event.crew_name}' failed, {source.id}",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(CrewTestStartedEvent)
|
|
||||||
def on_crew_test_started(source, event: CrewTestStartedEvent):
|
|
||||||
cloned_crew = source.copy()
|
|
||||||
self._telemetry.test_execution_span(
|
|
||||||
cloned_crew,
|
|
||||||
event.n_iterations,
|
|
||||||
event.inputs,
|
|
||||||
event.eval_llm or "",
|
|
||||||
)
|
|
||||||
self.logger.log(
|
|
||||||
f"🚀 Crew '{event.crew_name}' started test, {source.id}",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(CrewTestCompletedEvent)
|
|
||||||
def on_crew_test_completed(source, event: CrewTestCompletedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"✅ Crew '{event.crew_name}' completed test",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(CrewTestFailedEvent)
|
|
||||||
def on_crew_test_failed(source, event: CrewTestFailedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"❌ Crew '{event.crew_name}' failed test",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(CrewTrainStartedEvent)
|
|
||||||
def on_crew_train_started(source, event: CrewTrainStartedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"📋 Crew '{event.crew_name}' started train",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(CrewTrainCompletedEvent)
|
|
||||||
def on_crew_train_completed(source, event: CrewTrainCompletedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"✅ Crew '{event.crew_name}' completed train",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(CrewTrainFailedEvent)
|
|
||||||
def on_crew_train_failed(source, event: CrewTrainFailedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"❌ Crew '{event.crew_name}' failed train",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
# ----------- TASK EVENTS -----------
|
|
||||||
|
|
||||||
@crewai_event_bus.on(TaskStartedEvent)
|
|
||||||
def on_task_started(source, event: TaskStartedEvent):
|
|
||||||
span = self._telemetry.task_started(crew=source.agent.crew, task=source)
|
|
||||||
self.execution_spans[source] = span
|
|
||||||
|
|
||||||
self.logger.log(
|
|
||||||
f"📋 Task started: {source.description}",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(TaskCompletedEvent)
|
|
||||||
def on_task_completed(source, event: TaskCompletedEvent):
|
|
||||||
span = self.execution_spans.get(source)
|
|
||||||
if span:
|
|
||||||
self._telemetry.task_ended(span, source, source.agent.crew)
|
|
||||||
self.logger.log(
|
|
||||||
f"✅ Task completed: {source.description}",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
self.execution_spans[source] = None
|
|
||||||
|
|
||||||
@crewai_event_bus.on(TaskFailedEvent)
|
|
||||||
def on_task_failed(source, event: TaskFailedEvent):
|
|
||||||
span = self.execution_spans.get(source)
|
|
||||||
if span:
|
|
||||||
if source.agent and source.agent.crew:
|
|
||||||
self._telemetry.task_ended(span, source, source.agent.crew)
|
|
||||||
self.execution_spans[source] = None
|
|
||||||
self.logger.log(
|
|
||||||
f"❌ Task failed: {source.description}",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
# ----------- AGENT EVENTS -----------
|
|
||||||
|
|
||||||
@crewai_event_bus.on(AgentExecutionStartedEvent)
|
|
||||||
def on_agent_execution_started(source, event: AgentExecutionStartedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"🤖 Agent '{event.agent.role}' started task",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(AgentExecutionCompletedEvent)
|
|
||||||
def on_agent_execution_completed(source, event: AgentExecutionCompletedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"✅ Agent '{event.agent.role}' completed task",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
# ----------- FLOW EVENTS -----------
|
|
||||||
|
|
||||||
@crewai_event_bus.on(FlowCreatedEvent)
|
|
||||||
def on_flow_created(source, event: FlowCreatedEvent):
|
|
||||||
self._telemetry.flow_creation_span(event.flow_name)
|
|
||||||
self.logger.log(
|
|
||||||
f"🌊 Flow Created: '{event.flow_name}'",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(FlowStartedEvent)
|
|
||||||
def on_flow_started(source, event: FlowStartedEvent):
|
|
||||||
self._telemetry.flow_execution_span(
|
|
||||||
event.flow_name, list(source._methods.keys())
|
|
||||||
)
|
|
||||||
self.logger.log(
|
|
||||||
f"🤖 Flow Started: '{event.flow_name}', {source.flow_id}",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(FlowFinishedEvent)
|
|
||||||
def on_flow_finished(source, event: FlowFinishedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"👍 Flow Finished: '{event.flow_name}', {source.flow_id}",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(MethodExecutionStartedEvent)
|
|
||||||
def on_method_execution_started(source, event: MethodExecutionStartedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"🤖 Flow Method Started: '{event.method_name}'",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(MethodExecutionFailedEvent)
|
|
||||||
def on_method_execution_failed(source, event: MethodExecutionFailedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"❌ Flow Method Failed: '{event.method_name}'",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(MethodExecutionFinishedEvent)
|
|
||||||
def on_method_execution_finished(source, event: MethodExecutionFinishedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"👍 Flow Method Finished: '{event.method_name}'",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
# ----------- TOOL USAGE EVENTS -----------
|
|
||||||
|
|
||||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
|
||||||
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"🤖 Tool Usage Started: '{event.tool_name}'",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(ToolUsageFinishedEvent)
|
|
||||||
def on_tool_usage_finished(source, event: ToolUsageFinishedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"✅ Tool Usage Finished: '{event.tool_name}'",
|
|
||||||
event.timestamp,
|
|
||||||
#
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
|
||||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"❌ Tool Usage Error: '{event.tool_name}'",
|
|
||||||
event.timestamp,
|
|
||||||
#
|
|
||||||
)
|
|
||||||
|
|
||||||
# ----------- LLM EVENTS -----------
|
|
||||||
|
|
||||||
@crewai_event_bus.on(LLMCallStartedEvent)
|
|
||||||
def on_llm_call_started(source, event: LLMCallStartedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"🤖 LLM Call Started",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(LLMCallCompletedEvent)
|
|
||||||
def on_llm_call_completed(source, event: LLMCallCompletedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"✅ LLM Call Completed",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(LLMCallFailedEvent)
|
|
||||||
def on_llm_call_failed(source, event: LLMCallFailedEvent):
|
|
||||||
self.logger.log(
|
|
||||||
f"❌ LLM Call Failed: '{event.error}'",
|
|
||||||
event.timestamp,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
event_listener = EventListener()
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
from typing import Union
|
|
||||||
|
|
||||||
from .agent_events import (
|
|
||||||
AgentExecutionCompletedEvent,
|
|
||||||
AgentExecutionErrorEvent,
|
|
||||||
AgentExecutionStartedEvent,
|
|
||||||
)
|
|
||||||
from .crew_events import (
|
|
||||||
CrewKickoffCompletedEvent,
|
|
||||||
CrewKickoffFailedEvent,
|
|
||||||
CrewKickoffStartedEvent,
|
|
||||||
CrewTestCompletedEvent,
|
|
||||||
CrewTestFailedEvent,
|
|
||||||
CrewTestStartedEvent,
|
|
||||||
CrewTrainCompletedEvent,
|
|
||||||
CrewTrainFailedEvent,
|
|
||||||
CrewTrainStartedEvent,
|
|
||||||
)
|
|
||||||
from .flow_events import (
|
|
||||||
FlowFinishedEvent,
|
|
||||||
FlowStartedEvent,
|
|
||||||
MethodExecutionFailedEvent,
|
|
||||||
MethodExecutionFinishedEvent,
|
|
||||||
MethodExecutionStartedEvent,
|
|
||||||
)
|
|
||||||
from .task_events import (
|
|
||||||
TaskCompletedEvent,
|
|
||||||
TaskFailedEvent,
|
|
||||||
TaskStartedEvent,
|
|
||||||
)
|
|
||||||
from .tool_usage_events import (
|
|
||||||
ToolUsageErrorEvent,
|
|
||||||
ToolUsageFinishedEvent,
|
|
||||||
ToolUsageStartedEvent,
|
|
||||||
)
|
|
||||||
|
|
||||||
EventTypes = Union[
|
|
||||||
CrewKickoffStartedEvent,
|
|
||||||
CrewKickoffCompletedEvent,
|
|
||||||
CrewKickoffFailedEvent,
|
|
||||||
CrewTestStartedEvent,
|
|
||||||
CrewTestCompletedEvent,
|
|
||||||
CrewTestFailedEvent,
|
|
||||||
CrewTrainStartedEvent,
|
|
||||||
CrewTrainCompletedEvent,
|
|
||||||
CrewTrainFailedEvent,
|
|
||||||
AgentExecutionStartedEvent,
|
|
||||||
AgentExecutionCompletedEvent,
|
|
||||||
TaskStartedEvent,
|
|
||||||
TaskCompletedEvent,
|
|
||||||
TaskFailedEvent,
|
|
||||||
FlowStartedEvent,
|
|
||||||
FlowFinishedEvent,
|
|
||||||
MethodExecutionStartedEvent,
|
|
||||||
MethodExecutionFinishedEvent,
|
|
||||||
MethodExecutionFailedEvent,
|
|
||||||
AgentExecutionErrorEvent,
|
|
||||||
ToolUsageFinishedEvent,
|
|
||||||
ToolUsageErrorEvent,
|
|
||||||
ToolUsageStartedEvent,
|
|
||||||
]
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
from typing import Any, Dict, Optional, Union
|
|
||||||
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from .base_events import CrewEvent
|
|
||||||
|
|
||||||
|
|
||||||
class FlowEvent(CrewEvent):
|
|
||||||
"""Base class for all flow events"""
|
|
||||||
|
|
||||||
type: str
|
|
||||||
flow_name: str
|
|
||||||
|
|
||||||
|
|
||||||
class FlowStartedEvent(FlowEvent):
|
|
||||||
"""Event emitted when a flow starts execution"""
|
|
||||||
|
|
||||||
flow_name: str
|
|
||||||
inputs: Optional[Dict[str, Any]] = None
|
|
||||||
type: str = "flow_started"
|
|
||||||
|
|
||||||
|
|
||||||
class FlowCreatedEvent(FlowEvent):
|
|
||||||
"""Event emitted when a flow is created"""
|
|
||||||
|
|
||||||
flow_name: str
|
|
||||||
type: str = "flow_created"
|
|
||||||
|
|
||||||
|
|
||||||
class MethodExecutionStartedEvent(FlowEvent):
|
|
||||||
"""Event emitted when a flow method starts execution"""
|
|
||||||
|
|
||||||
flow_name: str
|
|
||||||
method_name: str
|
|
||||||
state: Union[Dict[str, Any], BaseModel]
|
|
||||||
params: Optional[Dict[str, Any]] = None
|
|
||||||
type: str = "method_execution_started"
|
|
||||||
|
|
||||||
|
|
||||||
class MethodExecutionFinishedEvent(FlowEvent):
|
|
||||||
"""Event emitted when a flow method completes execution"""
|
|
||||||
|
|
||||||
flow_name: str
|
|
||||||
method_name: str
|
|
||||||
result: Any = None
|
|
||||||
state: Union[Dict[str, Any], BaseModel]
|
|
||||||
type: str = "method_execution_finished"
|
|
||||||
|
|
||||||
|
|
||||||
class MethodExecutionFailedEvent(FlowEvent):
|
|
||||||
"""Event emitted when a flow method fails execution"""
|
|
||||||
|
|
||||||
flow_name: str
|
|
||||||
method_name: str
|
|
||||||
error: Any
|
|
||||||
type: str = "method_execution_failed"
|
|
||||||
|
|
||||||
|
|
||||||
class FlowFinishedEvent(FlowEvent):
|
|
||||||
"""Event emitted when a flow completes execution"""
|
|
||||||
|
|
||||||
flow_name: str
|
|
||||||
result: Optional[Any] = None
|
|
||||||
type: str = "flow_finished"
|
|
||||||
|
|
||||||
|
|
||||||
class FlowPlotEvent(FlowEvent):
|
|
||||||
"""Event emitted when a flow plot is created"""
|
|
||||||
|
|
||||||
flow_name: str
|
|
||||||
type: str = "flow_plot"
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
from enum import Enum
|
|
||||||
from typing import Any, Dict, List, Optional, Union
|
|
||||||
|
|
||||||
from crewai.utilities.events.base_events import CrewEvent
|
|
||||||
|
|
||||||
|
|
||||||
class LLMCallType(Enum):
|
|
||||||
"""Type of LLM call being made"""
|
|
||||||
|
|
||||||
TOOL_CALL = "tool_call"
|
|
||||||
LLM_CALL = "llm_call"
|
|
||||||
|
|
||||||
|
|
||||||
class LLMCallStartedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a LLM call starts"""
|
|
||||||
|
|
||||||
type: str = "llm_call_started"
|
|
||||||
messages: Union[str, List[Dict[str, str]]]
|
|
||||||
tools: Optional[List[dict]] = None
|
|
||||||
callbacks: Optional[List[Any]] = None
|
|
||||||
available_functions: Optional[Dict[str, Any]] = None
|
|
||||||
|
|
||||||
|
|
||||||
class LLMCallCompletedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a LLM call completes"""
|
|
||||||
|
|
||||||
type: str = "llm_call_completed"
|
|
||||||
response: Any
|
|
||||||
call_type: LLMCallType
|
|
||||||
|
|
||||||
|
|
||||||
class LLMCallFailedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a LLM call fails"""
|
|
||||||
|
|
||||||
error: str
|
|
||||||
type: str = "llm_call_failed"
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
from typing import Optional
|
|
||||||
|
|
||||||
from crewai.tasks.task_output import TaskOutput
|
|
||||||
from crewai.utilities.events.base_events import CrewEvent
|
|
||||||
|
|
||||||
|
|
||||||
class TaskStartedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a task starts"""
|
|
||||||
|
|
||||||
type: str = "task_started"
|
|
||||||
context: Optional[str]
|
|
||||||
|
|
||||||
|
|
||||||
class TaskCompletedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a task completes"""
|
|
||||||
|
|
||||||
output: TaskOutput
|
|
||||||
type: str = "task_completed"
|
|
||||||
|
|
||||||
|
|
||||||
class TaskFailedEvent(CrewEvent):
|
|
||||||
"""Event emitted when a task fails"""
|
|
||||||
|
|
||||||
error: str
|
|
||||||
type: str = "task_failed"
|
|
||||||
|
|
||||||
|
|
||||||
class TaskEvaluationEvent(CrewEvent):
|
|
||||||
"""Event emitted when a task evaluation is completed"""
|
|
||||||
|
|
||||||
type: str = "task_evaluation"
|
|
||||||
evaluation_type: str
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
from .agentops_listener import agentops_listener
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
from typing import Optional
|
|
||||||
|
|
||||||
from crewai.utilities.events import (
|
|
||||||
CrewKickoffCompletedEvent,
|
|
||||||
ToolUsageErrorEvent,
|
|
||||||
ToolUsageStartedEvent,
|
|
||||||
)
|
|
||||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
|
||||||
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
|
|
||||||
from crewai.utilities.events.task_events import TaskEvaluationEvent
|
|
||||||
|
|
||||||
try:
|
|
||||||
import agentops
|
|
||||||
|
|
||||||
AGENTOPS_INSTALLED = True
|
|
||||||
except ImportError:
|
|
||||||
AGENTOPS_INSTALLED = False
|
|
||||||
|
|
||||||
|
|
||||||
class AgentOpsListener(BaseEventListener):
|
|
||||||
tool_event: Optional["agentops.ToolEvent"] = None
|
|
||||||
session: Optional["agentops.Session"] = None
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
def setup_listeners(self, crewai_event_bus):
|
|
||||||
if not AGENTOPS_INSTALLED:
|
|
||||||
return
|
|
||||||
|
|
||||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
|
||||||
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
|
|
||||||
self.session = agentops.init()
|
|
||||||
for agent in source.agents:
|
|
||||||
if self.session:
|
|
||||||
self.session.create_agent(
|
|
||||||
name=agent.role,
|
|
||||||
agent_id=str(agent.id),
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
|
||||||
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
|
|
||||||
if self.session:
|
|
||||||
self.session.end_session(
|
|
||||||
end_state="Success",
|
|
||||||
end_state_reason="Finished Execution",
|
|
||||||
)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
|
||||||
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
|
|
||||||
self.tool_event = agentops.ToolEvent(name=event.tool_name)
|
|
||||||
if self.session:
|
|
||||||
self.session.record(self.tool_event)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
|
||||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
|
||||||
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
|
|
||||||
|
|
||||||
@crewai_event_bus.on(TaskEvaluationEvent)
|
|
||||||
def on_task_evaluation(source, event: TaskEvaluationEvent):
|
|
||||||
if self.session:
|
|
||||||
self.session.create_agent(
|
|
||||||
name="Task Evaluator", agent_id=str(source.original_agent.id)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
agentops_listener = AgentOpsListener()
|
|
||||||
@@ -1,64 +0,0 @@
|
|||||||
from datetime import datetime
|
|
||||||
from typing import Any, Callable, Dict
|
|
||||||
|
|
||||||
from .base_events import CrewEvent
|
|
||||||
|
|
||||||
|
|
||||||
class ToolUsageEvent(CrewEvent):
|
|
||||||
"""Base event for tool usage tracking"""
|
|
||||||
|
|
||||||
agent_key: str
|
|
||||||
agent_role: str
|
|
||||||
tool_name: str
|
|
||||||
tool_args: Dict[str, Any] | str
|
|
||||||
tool_class: str
|
|
||||||
run_attempts: int | None = None
|
|
||||||
delegations: int | None = None
|
|
||||||
|
|
||||||
model_config = {"arbitrary_types_allowed": True}
|
|
||||||
|
|
||||||
|
|
||||||
class ToolUsageStartedEvent(ToolUsageEvent):
|
|
||||||
"""Event emitted when a tool execution is started"""
|
|
||||||
|
|
||||||
type: str = "tool_usage_started"
|
|
||||||
|
|
||||||
|
|
||||||
class ToolUsageFinishedEvent(ToolUsageEvent):
|
|
||||||
"""Event emitted when a tool execution is completed"""
|
|
||||||
|
|
||||||
started_at: datetime
|
|
||||||
finished_at: datetime
|
|
||||||
from_cache: bool = False
|
|
||||||
type: str = "tool_usage_finished"
|
|
||||||
|
|
||||||
|
|
||||||
class ToolUsageErrorEvent(ToolUsageEvent):
|
|
||||||
"""Event emitted when a tool execution encounters an error"""
|
|
||||||
|
|
||||||
error: Any
|
|
||||||
type: str = "tool_usage_error"
|
|
||||||
|
|
||||||
|
|
||||||
class ToolValidateInputErrorEvent(ToolUsageEvent):
|
|
||||||
"""Event emitted when a tool input validation encounters an error"""
|
|
||||||
|
|
||||||
error: Any
|
|
||||||
type: str = "tool_validate_input_error"
|
|
||||||
|
|
||||||
|
|
||||||
class ToolSelectionErrorEvent(ToolUsageEvent):
|
|
||||||
"""Event emitted when a tool selection encounters an error"""
|
|
||||||
|
|
||||||
error: Any
|
|
||||||
type: str = "tool_selection_error"
|
|
||||||
|
|
||||||
|
|
||||||
class ToolExecutionErrorEvent(CrewEvent):
|
|
||||||
"""Event emitted when a tool execution encounters an error"""
|
|
||||||
|
|
||||||
error: Any
|
|
||||||
type: str = "tool_execution_error"
|
|
||||||
tool_name: str
|
|
||||||
tool_args: Dict[str, Any]
|
|
||||||
tool_class: Callable
|
|
||||||
@@ -1,64 +1,30 @@
|
|||||||
import json
|
|
||||||
import os
|
import os
|
||||||
import pickle
|
import pickle
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Union
|
|
||||||
|
|
||||||
|
|
||||||
class FileHandler:
|
class FileHandler:
|
||||||
"""Handler for file operations supporting both JSON and text-based logging.
|
"""take care of file operations, currently it only logs messages to a file"""
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path (Union[bool, str]): Path to the log file or boolean flag
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, file_path: Union[bool, str]):
|
def __init__(self, file_path):
|
||||||
self._initialize_path(file_path)
|
if isinstance(file_path, bool):
|
||||||
|
|
||||||
def _initialize_path(self, file_path: Union[bool, str]):
|
|
||||||
if file_path is True: # File path is boolean True
|
|
||||||
self._path = os.path.join(os.curdir, "logs.txt")
|
self._path = os.path.join(os.curdir, "logs.txt")
|
||||||
|
elif isinstance(file_path, str):
|
||||||
elif isinstance(file_path, str): # File path is a string
|
self._path = file_path
|
||||||
if file_path.endswith((".json", ".txt")):
|
|
||||||
self._path = file_path # No modification if the file ends with .json or .txt
|
|
||||||
else:
|
|
||||||
self._path = file_path + ".txt" # Append .txt if the file doesn't end with .json or .txt
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise ValueError("file_path must be a string or boolean.") # Handle the case where file_path isn't valid
|
raise ValueError("file_path must be either a boolean or a string.")
|
||||||
|
|
||||||
def log(self, **kwargs):
|
def log(self, **kwargs):
|
||||||
try:
|
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||||
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
message = (
|
||||||
log_entry = {"timestamp": now, **kwargs}
|
f"{now}: "
|
||||||
|
+ ", ".join([f'{key}="{value}"' for key, value in kwargs.items()])
|
||||||
|
+ "\n"
|
||||||
|
)
|
||||||
|
with open(self._path, "a", encoding="utf-8") as file:
|
||||||
|
file.write(message + "\n")
|
||||||
|
|
||||||
if self._path.endswith(".json"):
|
|
||||||
# Append log in JSON format
|
|
||||||
with open(self._path, "a", encoding="utf-8") as file:
|
|
||||||
# If the file is empty, start with a list; else, append to it
|
|
||||||
try:
|
|
||||||
# Try reading existing content to avoid overwriting
|
|
||||||
with open(self._path, "r", encoding="utf-8") as read_file:
|
|
||||||
existing_data = json.load(read_file)
|
|
||||||
existing_data.append(log_entry)
|
|
||||||
except (json.JSONDecodeError, FileNotFoundError):
|
|
||||||
# If no valid JSON or file doesn't exist, start with an empty list
|
|
||||||
existing_data = [log_entry]
|
|
||||||
|
|
||||||
with open(self._path, "w", encoding="utf-8") as write_file:
|
|
||||||
json.dump(existing_data, write_file, indent=4)
|
|
||||||
write_file.write("\n")
|
|
||||||
|
|
||||||
else:
|
|
||||||
# Append log in plain text format
|
|
||||||
message = f"{now}: " + ", ".join([f"{key}=\"{value}\"" for key, value in kwargs.items()]) + "\n"
|
|
||||||
with open(self._path, "a", encoding="utf-8") as file:
|
|
||||||
file.write(message)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
raise ValueError(f"Failed to log message: {str(e)}")
|
|
||||||
|
|
||||||
class PickleHandler:
|
class PickleHandler:
|
||||||
def __init__(self, file_name: str) -> None:
|
def __init__(self, file_name: str) -> None:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -11,10 +11,12 @@ class InternalInstructor:
|
|||||||
model: Type,
|
model: Type,
|
||||||
agent: Optional[Any] = None,
|
agent: Optional[Any] = None,
|
||||||
llm: Optional[str] = None,
|
llm: Optional[str] = None,
|
||||||
|
instructions: Optional[str] = None,
|
||||||
):
|
):
|
||||||
self.content = content
|
self.content = content
|
||||||
self.agent = agent
|
self.agent = agent
|
||||||
self.llm = llm
|
self.llm = llm
|
||||||
|
self.instructions = instructions
|
||||||
self.model = model
|
self.model = model
|
||||||
self._client = None
|
self._client = None
|
||||||
self.set_instructor()
|
self.set_instructor()
|
||||||
@@ -29,7 +31,10 @@ class InternalInstructor:
|
|||||||
import instructor
|
import instructor
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
self._client = instructor.from_litellm(completion)
|
self._client = instructor.from_litellm(
|
||||||
|
completion,
|
||||||
|
mode=instructor.Mode.TOOLS,
|
||||||
|
)
|
||||||
|
|
||||||
def to_json(self):
|
def to_json(self):
|
||||||
model = self.to_pydantic()
|
model = self.to_pydantic()
|
||||||
@@ -37,6 +42,8 @@ class InternalInstructor:
|
|||||||
|
|
||||||
def to_pydantic(self):
|
def to_pydantic(self):
|
||||||
messages = [{"role": "user", "content": self.content}]
|
messages = [{"role": "user", "content": self.content}]
|
||||||
|
if self.instructions:
|
||||||
|
messages.append({"role": "system", "content": self.instructions})
|
||||||
model = self._client.chat.completions.create(
|
model = self._client.chat.completions.create(
|
||||||
model=self.llm.model, response_model=self.model, messages=messages
|
model=self.llm.model, response_model=self.model, messages=messages
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -44,7 +44,6 @@ def create_llm(
|
|||||||
# Extract attributes with explicit types
|
# Extract attributes with explicit types
|
||||||
model = (
|
model = (
|
||||||
getattr(llm_value, "model_name", None)
|
getattr(llm_value, "model_name", None)
|
||||||
or getattr(llm_value, "model", None)
|
|
||||||
or getattr(llm_value, "deployment_name", None)
|
or getattr(llm_value, "deployment_name", None)
|
||||||
or str(llm_value)
|
or str(llm_value)
|
||||||
)
|
)
|
||||||
@@ -54,7 +53,6 @@ def create_llm(
|
|||||||
timeout: Optional[float] = getattr(llm_value, "timeout", None)
|
timeout: Optional[float] = getattr(llm_value, "timeout", None)
|
||||||
api_key: Optional[str] = getattr(llm_value, "api_key", None)
|
api_key: Optional[str] = getattr(llm_value, "api_key", None)
|
||||||
base_url: Optional[str] = getattr(llm_value, "base_url", None)
|
base_url: Optional[str] = getattr(llm_value, "base_url", None)
|
||||||
api_base: Optional[str] = getattr(llm_value, "api_base", None)
|
|
||||||
|
|
||||||
created_llm = LLM(
|
created_llm = LLM(
|
||||||
model=model,
|
model=model,
|
||||||
@@ -64,7 +62,6 @@ def create_llm(
|
|||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
api_key=api_key,
|
api_key=api_key,
|
||||||
base_url=base_url,
|
base_url=base_url,
|
||||||
api_base=api_base,
|
|
||||||
)
|
)
|
||||||
return created_llm
|
return created_llm
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -104,18 +101,8 @@ def _llm_via_environment_or_fallback() -> Optional[LLM]:
|
|||||||
callbacks: List[Any] = []
|
callbacks: List[Any] = []
|
||||||
|
|
||||||
# Optional base URL from env
|
# Optional base URL from env
|
||||||
base_url = (
|
api_base = os.environ.get("OPENAI_API_BASE") or os.environ.get("OPENAI_BASE_URL")
|
||||||
os.environ.get("BASE_URL")
|
if api_base:
|
||||||
or os.environ.get("OPENAI_API_BASE")
|
|
||||||
or os.environ.get("OPENAI_BASE_URL")
|
|
||||||
)
|
|
||||||
|
|
||||||
api_base = os.environ.get("API_BASE") or os.environ.get("AZURE_API_BASE")
|
|
||||||
|
|
||||||
# Synchronize base_url and api_base if one is populated and the other is not
|
|
||||||
if base_url and not api_base:
|
|
||||||
api_base = base_url
|
|
||||||
elif api_base and not base_url:
|
|
||||||
base_url = api_base
|
base_url = api_base
|
||||||
|
|
||||||
# Initialize llm_params dictionary
|
# Initialize llm_params dictionary
|
||||||
@@ -128,7 +115,6 @@ def _llm_via_environment_or_fallback() -> Optional[LLM]:
|
|||||||
"timeout": timeout,
|
"timeout": timeout,
|
||||||
"api_key": api_key,
|
"api_key": api_key,
|
||||||
"base_url": base_url,
|
"base_url": base_url,
|
||||||
"api_base": api_base,
|
|
||||||
"api_version": api_version,
|
"api_version": api_version,
|
||||||
"presence_penalty": presence_penalty,
|
"presence_penalty": presence_penalty,
|
||||||
"frequency_penalty": frequency_penalty,
|
"frequency_penalty": frequency_penalty,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user