mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-17 12:58:31 +00:00
Compare commits
125 Commits
bugfix/tes
...
devin/1738
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
20fc2f9878 | ||
|
|
d6d98ee969 | ||
|
|
c149b75874 | ||
|
|
86844ff3df | ||
|
|
b442fe20a2 | ||
|
|
9b1b1d33ba | ||
|
|
3c350e8933 | ||
|
|
a3a5507f9a | ||
|
|
a175167aaf | ||
|
|
1dc62b0d0a | ||
|
|
e0600e3bb9 | ||
|
|
75b376ebac | ||
|
|
29106068b7 | ||
|
|
3bf531189f | ||
|
|
47919a60a0 | ||
|
|
6b9ed90510 | ||
|
|
f6a65486f1 | ||
|
|
bf6db93bdf | ||
|
|
a79d77dfd7 | ||
|
|
25e68bc459 | ||
|
|
56ec9bc224 | ||
|
|
6f6010db1c | ||
|
|
a95227deef | ||
|
|
636dac6efb | ||
|
|
a4e2b17bae | ||
|
|
8eef02739a | ||
|
|
6f4ad532e6 | ||
|
|
74a1de8550 | ||
|
|
e529766391 | ||
|
|
a7f5d574dc | ||
|
|
0cc02d9492 | ||
|
|
fa26f6ebae | ||
|
|
f6c2982619 | ||
|
|
5a8649a97f | ||
|
|
e6100debac | ||
|
|
abee94d056 | ||
|
|
92731544ae | ||
|
|
77c7b7dfa1 | ||
|
|
823f22a601 | ||
|
|
649414805d | ||
|
|
8017ab2dfd | ||
|
|
6445cda35a | ||
|
|
6116c73721 | ||
|
|
a038b751ef | ||
|
|
5006161d31 | ||
|
|
85a13751ba | ||
|
|
1c7c4cb828 | ||
|
|
509fb375ca | ||
|
|
d01d44b29c | ||
|
|
ea64c29fee | ||
|
|
f4bb040ad8 | ||
|
|
515478473a | ||
|
|
9cf3fadd0f | ||
|
|
89c4b3fe88 | ||
|
|
9e5c599f58 | ||
|
|
a950e67c7d | ||
|
|
de6933b2d2 | ||
|
|
748383d74c | ||
|
|
23b9e10323 | ||
|
|
ddb7958da7 | ||
|
|
477cce321f | ||
|
|
7bed63a693 | ||
|
|
2709a9205a | ||
|
|
d19d7b01ec | ||
|
|
a3ad2c1957 | ||
|
|
c3e7a3ec19 | ||
|
|
cba8c9faec | ||
|
|
bcb7fb27d0 | ||
|
|
c310044bec | ||
|
|
5263df24b6 | ||
|
|
dea6ed7ef0 | ||
|
|
d3a0dad323 | ||
|
|
67bf4aea56 | ||
|
|
8c76bad50f | ||
|
|
e27a15023c | ||
|
|
a836f466f4 | ||
|
|
67f0de1f90 | ||
|
|
c642ebf97e | ||
|
|
a21e310d78 | ||
|
|
aba68da542 | ||
|
|
e254f11933 | ||
|
|
ab2274caf0 | ||
|
|
3e4f112f39 | ||
|
|
cc018bf128 | ||
|
|
46d3e4d4d9 | ||
|
|
627bb3f5f6 | ||
|
|
4a44245de9 | ||
|
|
30d027158a | ||
|
|
3fecde49b6 | ||
|
|
cc129a0bce | ||
|
|
b5779dca12 | ||
|
|
42311d9c7a | ||
|
|
294f2cc3a9 | ||
|
|
3dc442801f | ||
|
|
c12343a8b8 | ||
|
|
835557e648 | ||
|
|
4185ea688f | ||
|
|
0532089246 | ||
|
|
24b155015c | ||
|
|
8ceeec7d36 | ||
|
|
75e68f6fc8 | ||
|
|
3de81cedd6 | ||
|
|
5dc8dd0e8a | ||
|
|
b8d07fee83 | ||
|
|
be8e33daf6 | ||
|
|
efc8323c63 | ||
|
|
831951efc4 | ||
|
|
2131b94ddb | ||
|
|
b3504e768c | ||
|
|
350457b9b8 | ||
|
|
355bf3b48b | ||
|
|
0e94236735 | ||
|
|
673a38c5d9 | ||
|
|
8f57753656 | ||
|
|
a2f839fada | ||
|
|
440883e9e8 | ||
|
|
d3da73136c | ||
|
|
7272fd15ac | ||
|
|
518800239c | ||
|
|
30bd79390a | ||
|
|
d1e2430aac | ||
|
|
bfe2c44f55 | ||
|
|
845951a0db | ||
|
|
c1172a685a | ||
|
|
4bcc3b532d |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,3 +21,4 @@ crew_tasks_output.json
|
|||||||
.mypy_cache
|
.mypy_cache
|
||||||
.ruff_cache
|
.ruff_cache
|
||||||
.venv
|
.venv
|
||||||
|
agentops.log
|
||||||
18
README.md
18
README.md
@@ -1,10 +1,18 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
# **CrewAI**
|
# **CrewAI**
|
||||||
|
|
||||||
🤖 **CrewAI**: Production-grade framework for orchestrating sophisticated AI agent systems. From simple automations to complex real-world applications, CrewAI provides precise control and deep customization. By fostering collaborative intelligence through flexible, production-ready architecture, CrewAI empowers agents to work together seamlessly, tackling complex business challenges with predictable, consistent results.
|
**CrewAI**: Production-grade framework for orchestrating sophisticated AI agent systems. From simple automations to complex real-world applications, CrewAI provides precise control and deep customization. By fostering collaborative intelligence through flexible, production-ready architecture, CrewAI empowers agents to work together seamlessly, tackling complex business challenges with predictable, consistent results.
|
||||||
|
|
||||||
|
**CrewAI Enterprise**
|
||||||
|
Want to plan, build (+ no code), deploy, monitor and interare your agents: [CrewAI Enterprise](https://www.crewai.com/enterprise). Designed for complex, real-world applications, our enterprise solution offers:
|
||||||
|
|
||||||
|
- **Seamless Integrations**
|
||||||
|
- **Scalable & Secure Deployment**
|
||||||
|
- **Actionable Insights**
|
||||||
|
- **24/7 Support**
|
||||||
|
|
||||||
<h3>
|
<h3>
|
||||||
|
|
||||||
@@ -190,7 +198,7 @@ research_task:
|
|||||||
description: >
|
description: >
|
||||||
Conduct a thorough research about {topic}
|
Conduct a thorough research about {topic}
|
||||||
Make sure you find any interesting and relevant information given
|
Make sure you find any interesting and relevant information given
|
||||||
the current year is 2024.
|
the current year is 2025.
|
||||||
expected_output: >
|
expected_output: >
|
||||||
A list with 10 bullet points of the most relevant information about {topic}
|
A list with 10 bullet points of the most relevant information about {topic}
|
||||||
agent: researcher
|
agent: researcher
|
||||||
@@ -392,7 +400,7 @@ class AdvancedAnalysisFlow(Flow[MarketState]):
|
|||||||
goal="Gather and validate supporting market data",
|
goal="Gather and validate supporting market data",
|
||||||
backstory="You excel at finding and correlating multiple data sources"
|
backstory="You excel at finding and correlating multiple data sources"
|
||||||
)
|
)
|
||||||
|
|
||||||
analysis_task = Task(
|
analysis_task = Task(
|
||||||
description="Analyze {sector} sector data for the past {timeframe}",
|
description="Analyze {sector} sector data for the past {timeframe}",
|
||||||
expected_output="Detailed market analysis with confidence score",
|
expected_output="Detailed market analysis with confidence score",
|
||||||
@@ -403,7 +411,7 @@ class AdvancedAnalysisFlow(Flow[MarketState]):
|
|||||||
expected_output="Corroborating evidence and potential contradictions",
|
expected_output="Corroborating evidence and potential contradictions",
|
||||||
agent=researcher
|
agent=researcher
|
||||||
)
|
)
|
||||||
|
|
||||||
# Demonstrate crew autonomy
|
# Demonstrate crew autonomy
|
||||||
analysis_crew = Crew(
|
analysis_crew = Crew(
|
||||||
agents=[analyst, researcher],
|
agents=[analyst, researcher],
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ Think of an agent as a specialized team member with specific skills, expertise,
|
|||||||
| **Max Retry Limit** _(optional)_ | `max_retry_limit` | `int` | Maximum number of retries when an error occurs. Default is 2. |
|
| **Max Retry Limit** _(optional)_ | `max_retry_limit` | `int` | Maximum number of retries when an error occurs. Default is 2. |
|
||||||
| **Respect Context Window** _(optional)_ | `respect_context_window` | `bool` | Keep messages under context window size by summarizing. Default is True. |
|
| **Respect Context Window** _(optional)_ | `respect_context_window` | `bool` | Keep messages under context window size by summarizing. Default is True. |
|
||||||
| **Code Execution Mode** _(optional)_ | `code_execution_mode` | `Literal["safe", "unsafe"]` | Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct). Default is 'safe'. |
|
| **Code Execution Mode** _(optional)_ | `code_execution_mode` | `Literal["safe", "unsafe"]` | Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct). Default is 'safe'. |
|
||||||
| **Embedder Config** _(optional)_ | `embedder_config` | `Optional[Dict[str, Any]]` | Configuration for the embedder used by the agent. |
|
| **Embedder** _(optional)_ | `embedder` | `Optional[Dict[str, Any]]` | Configuration for the embedder used by the agent. |
|
||||||
| **Knowledge Sources** _(optional)_ | `knowledge_sources` | `Optional[List[BaseKnowledgeSource]]` | Knowledge sources available to the agent. |
|
| **Knowledge Sources** _(optional)_ | `knowledge_sources` | `Optional[List[BaseKnowledgeSource]]` | Knowledge sources available to the agent. |
|
||||||
| **Use System Prompt** _(optional)_ | `use_system_prompt` | `Optional[bool]` | Whether to use system prompt (for o1 model support). Default is True. |
|
| **Use System Prompt** _(optional)_ | `use_system_prompt` | `Optional[bool]` | Whether to use system prompt (for o1 model support). Default is True. |
|
||||||
|
|
||||||
@@ -101,6 +101,8 @@ from crewai_tools import SerperDevTool
|
|||||||
class LatestAiDevelopmentCrew():
|
class LatestAiDevelopmentCrew():
|
||||||
"""LatestAiDevelopment crew"""
|
"""LatestAiDevelopment crew"""
|
||||||
|
|
||||||
|
agents_config = "config/agents.yaml"
|
||||||
|
|
||||||
@agent
|
@agent
|
||||||
def researcher(self) -> Agent:
|
def researcher(self) -> Agent:
|
||||||
return Agent(
|
return Agent(
|
||||||
@@ -150,7 +152,7 @@ agent = Agent(
|
|||||||
use_system_prompt=True, # Default: True
|
use_system_prompt=True, # Default: True
|
||||||
tools=[SerperDevTool()], # Optional: List of tools
|
tools=[SerperDevTool()], # Optional: List of tools
|
||||||
knowledge_sources=None, # Optional: List of knowledge sources
|
knowledge_sources=None, # Optional: List of knowledge sources
|
||||||
embedder_config=None, # Optional: Custom embedder configuration
|
embedder=None, # Optional: Custom embedder configuration
|
||||||
system_template=None, # Optional: Custom system prompt template
|
system_template=None, # Optional: Custom system prompt template
|
||||||
prompt_template=None, # Optional: Custom prompt template
|
prompt_template=None, # Optional: Custom prompt template
|
||||||
response_template=None, # Optional: Custom response template
|
response_template=None, # Optional: Custom response template
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ The CrewAI CLI provides a set of commands to interact with CrewAI, allowing you
|
|||||||
|
|
||||||
To use the CrewAI CLI, make sure you have CrewAI installed:
|
To use the CrewAI CLI, make sure you have CrewAI installed:
|
||||||
|
|
||||||
```shell
|
```shell Terminal
|
||||||
pip install crewai
|
pip install crewai
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -20,7 +20,7 @@ pip install crewai
|
|||||||
|
|
||||||
The basic structure of a CrewAI CLI command is:
|
The basic structure of a CrewAI CLI command is:
|
||||||
|
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai [COMMAND] [OPTIONS] [ARGUMENTS]
|
crewai [COMMAND] [OPTIONS] [ARGUMENTS]
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -30,7 +30,7 @@ crewai [COMMAND] [OPTIONS] [ARGUMENTS]
|
|||||||
|
|
||||||
Create a new crew or flow.
|
Create a new crew or flow.
|
||||||
|
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai create [OPTIONS] TYPE NAME
|
crewai create [OPTIONS] TYPE NAME
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -38,7 +38,7 @@ crewai create [OPTIONS] TYPE NAME
|
|||||||
- `NAME`: Name of the crew or flow
|
- `NAME`: Name of the crew or flow
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai create crew my_new_crew
|
crewai create crew my_new_crew
|
||||||
crewai create flow my_new_flow
|
crewai create flow my_new_flow
|
||||||
```
|
```
|
||||||
@@ -47,14 +47,14 @@ crewai create flow my_new_flow
|
|||||||
|
|
||||||
Show the installed version of CrewAI.
|
Show the installed version of CrewAI.
|
||||||
|
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai version [OPTIONS]
|
crewai version [OPTIONS]
|
||||||
```
|
```
|
||||||
|
|
||||||
- `--tools`: (Optional) Show the installed version of CrewAI tools
|
- `--tools`: (Optional) Show the installed version of CrewAI tools
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai version
|
crewai version
|
||||||
crewai version --tools
|
crewai version --tools
|
||||||
```
|
```
|
||||||
@@ -63,7 +63,7 @@ crewai version --tools
|
|||||||
|
|
||||||
Train the crew for a specified number of iterations.
|
Train the crew for a specified number of iterations.
|
||||||
|
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai train [OPTIONS]
|
crewai train [OPTIONS]
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -71,7 +71,7 @@ crewai train [OPTIONS]
|
|||||||
- `-f, --filename TEXT`: Path to a custom file for training (default: "trained_agents_data.pkl")
|
- `-f, --filename TEXT`: Path to a custom file for training (default: "trained_agents_data.pkl")
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai train -n 10 -f my_training_data.pkl
|
crewai train -n 10 -f my_training_data.pkl
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -79,14 +79,14 @@ crewai train -n 10 -f my_training_data.pkl
|
|||||||
|
|
||||||
Replay the crew execution from a specific task.
|
Replay the crew execution from a specific task.
|
||||||
|
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai replay [OPTIONS]
|
crewai replay [OPTIONS]
|
||||||
```
|
```
|
||||||
|
|
||||||
- `-t, --task_id TEXT`: Replay the crew from this task ID, including all subsequent tasks
|
- `-t, --task_id TEXT`: Replay the crew from this task ID, including all subsequent tasks
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai replay -t task_123456
|
crewai replay -t task_123456
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -94,7 +94,7 @@ crewai replay -t task_123456
|
|||||||
|
|
||||||
Retrieve your latest crew.kickoff() task outputs.
|
Retrieve your latest crew.kickoff() task outputs.
|
||||||
|
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai log-tasks-outputs
|
crewai log-tasks-outputs
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -102,7 +102,7 @@ crewai log-tasks-outputs
|
|||||||
|
|
||||||
Reset the crew memories (long, short, entity, latest_crew_kickoff_outputs).
|
Reset the crew memories (long, short, entity, latest_crew_kickoff_outputs).
|
||||||
|
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai reset-memories [OPTIONS]
|
crewai reset-memories [OPTIONS]
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -113,7 +113,7 @@ crewai reset-memories [OPTIONS]
|
|||||||
- `-a, --all`: Reset ALL memories
|
- `-a, --all`: Reset ALL memories
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai reset-memories --long --short
|
crewai reset-memories --long --short
|
||||||
crewai reset-memories --all
|
crewai reset-memories --all
|
||||||
```
|
```
|
||||||
@@ -122,7 +122,7 @@ crewai reset-memories --all
|
|||||||
|
|
||||||
Test the crew and evaluate the results.
|
Test the crew and evaluate the results.
|
||||||
|
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai test [OPTIONS]
|
crewai test [OPTIONS]
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -130,7 +130,7 @@ crewai test [OPTIONS]
|
|||||||
- `-m, --model TEXT`: LLM Model to run the tests on the Crew (default: "gpt-4o-mini")
|
- `-m, --model TEXT`: LLM Model to run the tests on the Crew (default: "gpt-4o-mini")
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai test -n 5 -m gpt-3.5-turbo
|
crewai test -n 5 -m gpt-3.5-turbo
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -138,7 +138,7 @@ crewai test -n 5 -m gpt-3.5-turbo
|
|||||||
|
|
||||||
Run the crew.
|
Run the crew.
|
||||||
|
|
||||||
```shell
|
```shell Terminal
|
||||||
crewai run
|
crewai run
|
||||||
```
|
```
|
||||||
<Note>
|
<Note>
|
||||||
@@ -147,7 +147,36 @@ Some commands may require additional configuration or setup within your project
|
|||||||
</Note>
|
</Note>
|
||||||
|
|
||||||
|
|
||||||
### 9. API Keys
|
### 9. Chat
|
||||||
|
|
||||||
|
Starting in version `0.98.0`, when you run the `crewai chat` command, you start an interactive session with your crew. The AI assistant will guide you by asking for necessary inputs to execute the crew. Once all inputs are provided, the crew will execute its tasks.
|
||||||
|
|
||||||
|
After receiving the results, you can continue interacting with the assistant for further instructions or questions.
|
||||||
|
|
||||||
|
```shell Terminal
|
||||||
|
crewai chat
|
||||||
|
```
|
||||||
|
<Note>
|
||||||
|
Ensure you execute these commands from your CrewAI project's root directory.
|
||||||
|
</Note>
|
||||||
|
<Note>
|
||||||
|
IMPORTANT: Set the `chat_llm` property in your `crew.py` file to enable this command.
|
||||||
|
|
||||||
|
```python
|
||||||
|
@crew
|
||||||
|
def crew(self) -> Crew:
|
||||||
|
return Crew(
|
||||||
|
agents=self.agents,
|
||||||
|
tasks=self.tasks,
|
||||||
|
process=Process.sequential,
|
||||||
|
verbose=True,
|
||||||
|
chat_llm="gpt-4o", # LLM for chat orchestration
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Note>
|
||||||
|
|
||||||
|
|
||||||
|
### 10. API Keys
|
||||||
|
|
||||||
When running ```crewai create crew``` command, the CLI will first show you the top 5 most common LLM providers and ask you to select one.
|
When running ```crewai create crew``` command, the CLI will first show you the top 5 most common LLM providers and ask you to select one.
|
||||||
|
|
||||||
@@ -161,6 +190,7 @@ The CLI will initially prompt for API keys for the following services:
|
|||||||
* Groq
|
* Groq
|
||||||
* Anthropic
|
* Anthropic
|
||||||
* Google Gemini
|
* Google Gemini
|
||||||
|
* SambaNova
|
||||||
|
|
||||||
When you select a provider, the CLI will prompt you to enter your API key.
|
When you select a provider, the CLI will prompt you to enter your API key.
|
||||||
|
|
||||||
|
|||||||
@@ -23,14 +23,14 @@ A crew in crewAI represents a collaborative group of agents working together to
|
|||||||
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
|
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
|
||||||
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
|
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
|
||||||
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
|
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
|
||||||
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
|
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
|
||||||
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
||||||
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
||||||
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
||||||
| **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
|
| **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
|
||||||
| **Task Callback** _(optional)_ | `task_callback` | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. |
|
| **Task Callback** _(optional)_ | `task_callback` | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. |
|
||||||
| **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
|
| **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
|
||||||
| **Output Log File** _(optional)_ | `output_log_file` | Whether you want to have a file with the complete crew output and execution. You can set it using True and it will default to the folder you are currently in and it will be called logs.txt or passing a string with the full path and name of the file. |
|
| **Output Log File** _(optional)_ | `output_log_file` | Set to True to save logs as logs.txt in the current directory or provide a file path. Logs will be in JSON format if the filename ends in .json, otherwise .txt. Defautls to `None`. |
|
||||||
| **Manager Agent** _(optional)_ | `manager_agent` | `manager` sets a custom agent that will be used as a manager. |
|
| **Manager Agent** _(optional)_ | `manager_agent` | `manager` sets a custom agent that will be used as a manager. |
|
||||||
| **Prompt File** _(optional)_ | `prompt_file` | Path to the prompt JSON file to be used for the crew. |
|
| **Prompt File** _(optional)_ | `prompt_file` | Path to the prompt JSON file to be used for the crew. |
|
||||||
| **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. |
|
| **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. |
|
||||||
@@ -240,6 +240,23 @@ print(f"Tasks Output: {crew_output.tasks_output}")
|
|||||||
print(f"Token Usage: {crew_output.token_usage}")
|
print(f"Token Usage: {crew_output.token_usage}")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Accessing Crew Logs
|
||||||
|
|
||||||
|
You can see real time log of the crew execution, by setting `output_log_file` as a `True(Boolean)` or a `file_name(str)`. Supports logging of events as both `file_name.txt` and `file_name.json`.
|
||||||
|
In case of `True(Boolean)` will save as `logs.txt`.
|
||||||
|
|
||||||
|
In case of `output_log_file` is set as `False(Booelan)` or `None`, the logs will not be populated.
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
# Save crew logs
|
||||||
|
crew = Crew(output_log_file = True) # Logs will be saved as logs.txt
|
||||||
|
crew = Crew(output_log_file = file_name) # Logs will be saved as file_name.txt
|
||||||
|
crew = Crew(output_log_file = file_name.txt) # Logs will be saved as file_name.txt
|
||||||
|
crew = Crew(output_log_file = file_name.json) # Logs will be saved as file_name.json
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Memory Utilization
|
## Memory Utilization
|
||||||
|
|
||||||
Crews can utilize memory (short-term, long-term, and entity memory) to enhance their execution and learning over time. This feature allows crews to store and recall execution memories, aiding in decision-making and task execution strategies.
|
Crews can utilize memory (short-term, long-term, and entity memory) to enhance their execution and learning over time. This feature allows crews to store and recall execution memories, aiding in decision-making and task execution strategies.
|
||||||
@@ -279,9 +296,9 @@ print(result)
|
|||||||
Once your crew is assembled, initiate the workflow with the appropriate kickoff method. CrewAI provides several methods for better control over the kickoff process: `kickoff()`, `kickoff_for_each()`, `kickoff_async()`, and `kickoff_for_each_async()`.
|
Once your crew is assembled, initiate the workflow with the appropriate kickoff method. CrewAI provides several methods for better control over the kickoff process: `kickoff()`, `kickoff_for_each()`, `kickoff_async()`, and `kickoff_for_each_async()`.
|
||||||
|
|
||||||
- `kickoff()`: Starts the execution process according to the defined process flow.
|
- `kickoff()`: Starts the execution process according to the defined process flow.
|
||||||
- `kickoff_for_each()`: Executes tasks for each agent individually.
|
- `kickoff_for_each()`: Executes tasks sequentially for each provided input event or item in the collection.
|
||||||
- `kickoff_async()`: Initiates the workflow asynchronously.
|
- `kickoff_async()`: Initiates the workflow asynchronously.
|
||||||
- `kickoff_for_each_async()`: Executes tasks for each agent individually in an asynchronous manner.
|
- `kickoff_for_each_async()`: Executes tasks concurrently for each provided input event or item, leveraging asynchronous processing.
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
# Start the crew's task execution
|
# Start the crew's task execution
|
||||||
|
|||||||
@@ -35,6 +35,8 @@ class ExampleFlow(Flow):
|
|||||||
@start()
|
@start()
|
||||||
def generate_city(self):
|
def generate_city(self):
|
||||||
print("Starting flow")
|
print("Starting flow")
|
||||||
|
# Each flow state automatically gets a unique ID
|
||||||
|
print(f"Flow State ID: {self.state['id']}")
|
||||||
|
|
||||||
response = completion(
|
response = completion(
|
||||||
model=self.model,
|
model=self.model,
|
||||||
@@ -47,6 +49,8 @@ class ExampleFlow(Flow):
|
|||||||
)
|
)
|
||||||
|
|
||||||
random_city = response["choices"][0]["message"]["content"]
|
random_city = response["choices"][0]["message"]["content"]
|
||||||
|
# Store the city in our state
|
||||||
|
self.state["city"] = random_city
|
||||||
print(f"Random City: {random_city}")
|
print(f"Random City: {random_city}")
|
||||||
|
|
||||||
return random_city
|
return random_city
|
||||||
@@ -64,6 +68,8 @@ class ExampleFlow(Flow):
|
|||||||
)
|
)
|
||||||
|
|
||||||
fun_fact = response["choices"][0]["message"]["content"]
|
fun_fact = response["choices"][0]["message"]["content"]
|
||||||
|
# Store the fun fact in our state
|
||||||
|
self.state["fun_fact"] = fun_fact
|
||||||
return fun_fact
|
return fun_fact
|
||||||
|
|
||||||
|
|
||||||
@@ -76,7 +82,15 @@ print(f"Generated fun fact: {result}")
|
|||||||
|
|
||||||
In the above example, we have created a simple Flow that generates a random city using OpenAI and then generates a fun fact about that city. The Flow consists of two tasks: `generate_city` and `generate_fun_fact`. The `generate_city` task is the starting point of the Flow, and the `generate_fun_fact` task listens for the output of the `generate_city` task.
|
In the above example, we have created a simple Flow that generates a random city using OpenAI and then generates a fun fact about that city. The Flow consists of two tasks: `generate_city` and `generate_fun_fact`. The `generate_city` task is the starting point of the Flow, and the `generate_fun_fact` task listens for the output of the `generate_city` task.
|
||||||
|
|
||||||
When you run the Flow, it will generate a random city and then generate a fun fact about that city. The output will be printed to the console.
|
Each Flow instance automatically receives a unique identifier (UUID) in its state, which helps track and manage flow executions. The state can also store additional data (like the generated city and fun fact) that persists throughout the flow's execution.
|
||||||
|
|
||||||
|
When you run the Flow, it will:
|
||||||
|
1. Generate a unique ID for the flow state
|
||||||
|
2. Generate a random city and store it in the state
|
||||||
|
3. Generate a fun fact about that city and store it in the state
|
||||||
|
4. Print the results to the console
|
||||||
|
|
||||||
|
The state's unique ID and stored data can be useful for tracking flow executions and maintaining context between tasks.
|
||||||
|
|
||||||
**Note:** Ensure you have set up your `.env` file to store your `OPENAI_API_KEY`. This key is necessary for authenticating requests to the OpenAI API.
|
**Note:** Ensure you have set up your `.env` file to store your `OPENAI_API_KEY`. This key is necessary for authenticating requests to the OpenAI API.
|
||||||
|
|
||||||
@@ -138,7 +152,7 @@ print("---- Final Output ----")
|
|||||||
print(final_output)
|
print(final_output)
|
||||||
````
|
````
|
||||||
|
|
||||||
``` text Output
|
```text Output
|
||||||
---- Final Output ----
|
---- Final Output ----
|
||||||
Second method received: Output from first_method
|
Second method received: Output from first_method
|
||||||
````
|
````
|
||||||
@@ -207,34 +221,39 @@ allowing developers to choose the approach that best fits their application's ne
|
|||||||
|
|
||||||
In unstructured state management, all state is stored in the `state` attribute of the `Flow` class.
|
In unstructured state management, all state is stored in the `state` attribute of the `Flow` class.
|
||||||
This approach offers flexibility, enabling developers to add or modify state attributes on the fly without defining a strict schema.
|
This approach offers flexibility, enabling developers to add or modify state attributes on the fly without defining a strict schema.
|
||||||
|
Even with unstructured states, CrewAI Flows automatically generates and maintains a unique identifier (UUID) for each state instance.
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai.flow.flow import Flow, listen, start
|
from crewai.flow.flow import Flow, listen, start
|
||||||
|
|
||||||
class UntructuredExampleFlow(Flow):
|
class UnstructuredExampleFlow(Flow):
|
||||||
|
|
||||||
@start()
|
@start()
|
||||||
def first_method(self):
|
def first_method(self):
|
||||||
self.state.message = "Hello from structured flow"
|
# The state automatically includes an 'id' field
|
||||||
self.state.counter = 0
|
print(f"State ID: {self.state['id']}")
|
||||||
|
self.state['counter'] = 0
|
||||||
|
self.state['message'] = "Hello from structured flow"
|
||||||
|
|
||||||
@listen(first_method)
|
@listen(first_method)
|
||||||
def second_method(self):
|
def second_method(self):
|
||||||
self.state.counter += 1
|
self.state['counter'] += 1
|
||||||
self.state.message += " - updated"
|
self.state['message'] += " - updated"
|
||||||
|
|
||||||
@listen(second_method)
|
@listen(second_method)
|
||||||
def third_method(self):
|
def third_method(self):
|
||||||
self.state.counter += 1
|
self.state['counter'] += 1
|
||||||
self.state.message += " - updated again"
|
self.state['message'] += " - updated again"
|
||||||
|
|
||||||
print(f"State after third_method: {self.state}")
|
print(f"State after third_method: {self.state}")
|
||||||
|
|
||||||
|
|
||||||
flow = UntructuredExampleFlow()
|
flow = UnstructuredExampleFlow()
|
||||||
flow.kickoff()
|
flow.kickoff()
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Note:** The `id` field is automatically generated and preserved throughout the flow's execution. You don't need to manage or set it manually, and it will be maintained even when updating the state with new data.
|
||||||
|
|
||||||
**Key Points:**
|
**Key Points:**
|
||||||
|
|
||||||
- **Flexibility:** You can dynamically add attributes to `self.state` without predefined constraints.
|
- **Flexibility:** You can dynamically add attributes to `self.state` without predefined constraints.
|
||||||
@@ -245,12 +264,15 @@ flow.kickoff()
|
|||||||
Structured state management leverages predefined schemas to ensure consistency and type safety across the workflow.
|
Structured state management leverages predefined schemas to ensure consistency and type safety across the workflow.
|
||||||
By using models like Pydantic's `BaseModel`, developers can define the exact shape of the state, enabling better validation and auto-completion in development environments.
|
By using models like Pydantic's `BaseModel`, developers can define the exact shape of the state, enabling better validation and auto-completion in development environments.
|
||||||
|
|
||||||
|
Each state in CrewAI Flows automatically receives a unique identifier (UUID) to help track and manage state instances. This ID is automatically generated and managed by the Flow system.
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai.flow.flow import Flow, listen, start
|
from crewai.flow.flow import Flow, listen, start
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
class ExampleState(BaseModel):
|
class ExampleState(BaseModel):
|
||||||
|
# Note: 'id' field is automatically added to all states
|
||||||
counter: int = 0
|
counter: int = 0
|
||||||
message: str = ""
|
message: str = ""
|
||||||
|
|
||||||
@@ -259,6 +281,8 @@ class StructuredExampleFlow(Flow[ExampleState]):
|
|||||||
|
|
||||||
@start()
|
@start()
|
||||||
def first_method(self):
|
def first_method(self):
|
||||||
|
# Access the auto-generated ID if needed
|
||||||
|
print(f"State ID: {self.state.id}")
|
||||||
self.state.message = "Hello from structured flow"
|
self.state.message = "Hello from structured flow"
|
||||||
|
|
||||||
@listen(first_method)
|
@listen(first_method)
|
||||||
@@ -299,6 +323,91 @@ flow.kickoff()
|
|||||||
|
|
||||||
By providing both unstructured and structured state management options, CrewAI Flows empowers developers to build AI workflows that are both flexible and robust, catering to a wide range of application requirements.
|
By providing both unstructured and structured state management options, CrewAI Flows empowers developers to build AI workflows that are both flexible and robust, catering to a wide range of application requirements.
|
||||||
|
|
||||||
|
## Flow Persistence
|
||||||
|
|
||||||
|
The @persist decorator enables automatic state persistence in CrewAI Flows, allowing you to maintain flow state across restarts or different workflow executions. This decorator can be applied at either the class level or method level, providing flexibility in how you manage state persistence.
|
||||||
|
|
||||||
|
### Class-Level Persistence
|
||||||
|
|
||||||
|
When applied at the class level, the @persist decorator automatically persists all flow method states:
|
||||||
|
|
||||||
|
```python
|
||||||
|
@persist # Using SQLiteFlowPersistence by default
|
||||||
|
class MyFlow(Flow[MyState]):
|
||||||
|
@start()
|
||||||
|
def initialize_flow(self):
|
||||||
|
# This method will automatically have its state persisted
|
||||||
|
self.state.counter = 1
|
||||||
|
print("Initialized flow. State ID:", self.state.id)
|
||||||
|
|
||||||
|
@listen(initialize_flow)
|
||||||
|
def next_step(self):
|
||||||
|
# The state (including self.state.id) is automatically reloaded
|
||||||
|
self.state.counter += 1
|
||||||
|
print("Flow state is persisted. Counter:", self.state.counter)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Method-Level Persistence
|
||||||
|
|
||||||
|
For more granular control, you can apply @persist to specific methods:
|
||||||
|
|
||||||
|
```python
|
||||||
|
class AnotherFlow(Flow[dict]):
|
||||||
|
@persist # Persists only this method's state
|
||||||
|
@start()
|
||||||
|
def begin(self):
|
||||||
|
if "runs" not in self.state:
|
||||||
|
self.state["runs"] = 0
|
||||||
|
self.state["runs"] += 1
|
||||||
|
print("Method-level persisted runs:", self.state["runs"])
|
||||||
|
```
|
||||||
|
|
||||||
|
### How It Works
|
||||||
|
|
||||||
|
1. **Unique State Identification**
|
||||||
|
- Each flow state automatically receives a unique UUID
|
||||||
|
- The ID is preserved across state updates and method calls
|
||||||
|
- Supports both structured (Pydantic BaseModel) and unstructured (dictionary) states
|
||||||
|
|
||||||
|
2. **Default SQLite Backend**
|
||||||
|
- SQLiteFlowPersistence is the default storage backend
|
||||||
|
- States are automatically saved to a local SQLite database
|
||||||
|
- Robust error handling ensures clear messages if database operations fail
|
||||||
|
|
||||||
|
3. **Error Handling**
|
||||||
|
- Comprehensive error messages for database operations
|
||||||
|
- Automatic state validation during save and load
|
||||||
|
- Clear feedback when persistence operations encounter issues
|
||||||
|
|
||||||
|
### Important Considerations
|
||||||
|
|
||||||
|
- **State Types**: Both structured (Pydantic BaseModel) and unstructured (dictionary) states are supported
|
||||||
|
- **Automatic ID**: The `id` field is automatically added if not present
|
||||||
|
- **State Recovery**: Failed or restarted flows can automatically reload their previous state
|
||||||
|
- **Custom Implementation**: You can provide your own FlowPersistence implementation for specialized storage needs
|
||||||
|
|
||||||
|
### Technical Advantages
|
||||||
|
|
||||||
|
1. **Precise Control Through Low-Level Access**
|
||||||
|
- Direct access to persistence operations for advanced use cases
|
||||||
|
- Fine-grained control via method-level persistence decorators
|
||||||
|
- Built-in state inspection and debugging capabilities
|
||||||
|
- Full visibility into state changes and persistence operations
|
||||||
|
|
||||||
|
2. **Enhanced Reliability**
|
||||||
|
- Automatic state recovery after system failures or restarts
|
||||||
|
- Transaction-based state updates for data integrity
|
||||||
|
- Comprehensive error handling with clear error messages
|
||||||
|
- Robust validation during state save and load operations
|
||||||
|
|
||||||
|
3. **Extensible Architecture**
|
||||||
|
- Customizable persistence backend through FlowPersistence interface
|
||||||
|
- Support for specialized storage solutions beyond SQLite
|
||||||
|
- Compatible with both structured (Pydantic) and unstructured (dict) states
|
||||||
|
- Seamless integration with existing CrewAI flow patterns
|
||||||
|
|
||||||
|
The persistence system's architecture emphasizes technical precision and customization options, allowing developers to maintain full control over state management while benefiting from built-in reliability features.
|
||||||
|
|
||||||
## Flow Control
|
## Flow Control
|
||||||
|
|
||||||
### Conditional Logic: `or`
|
### Conditional Logic: `or`
|
||||||
@@ -628,4 +737,4 @@ Also, check out our YouTube video on how to use flows in CrewAI below!
|
|||||||
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
|
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
|
||||||
referrerpolicy="strict-origin-when-cross-origin"
|
referrerpolicy="strict-origin-when-cross-origin"
|
||||||
allowfullscreen
|
allowfullscreen
|
||||||
></iframe>
|
></iframe>
|
||||||
|
|||||||
@@ -4,8 +4,6 @@ description: What is knowledge in CrewAI and how to use it.
|
|||||||
icon: book
|
icon: book
|
||||||
---
|
---
|
||||||
|
|
||||||
# Using Knowledge in CrewAI
|
|
||||||
|
|
||||||
## What is Knowledge?
|
## What is Knowledge?
|
||||||
|
|
||||||
Knowledge in CrewAI is a powerful system that allows AI agents to access and utilize external information sources during their tasks.
|
Knowledge in CrewAI is a powerful system that allows AI agents to access and utilize external information sources during their tasks.
|
||||||
@@ -36,7 +34,20 @@ CrewAI supports various types of knowledge sources out of the box:
|
|||||||
</Card>
|
</Card>
|
||||||
</CardGroup>
|
</CardGroup>
|
||||||
|
|
||||||
## Quick Start
|
## Supported Knowledge Parameters
|
||||||
|
|
||||||
|
| Parameter | Type | Required | Description |
|
||||||
|
| :--------------------------- | :---------------------------------- | :------- | :---------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `sources` | **List[BaseKnowledgeSource]** | Yes | List of knowledge sources that provide content to be stored and queried. Can include PDF, CSV, Excel, JSON, text files, or string content. |
|
||||||
|
| `collection_name` | **str** | No | Name of the collection where the knowledge will be stored. Used to identify different sets of knowledge. Defaults to "knowledge" if not provided. |
|
||||||
|
| `storage` | **Optional[KnowledgeStorage]** | No | Custom storage configuration for managing how the knowledge is stored and retrieved. If not provided, a default storage will be created. |
|
||||||
|
|
||||||
|
## Quickstart Example
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
For file-Based Knowledge Sources, make sure to place your files in a `knowledge` directory at the root of your project.
|
||||||
|
Also, use relative paths from the `knowledge` directory when creating the source.
|
||||||
|
</Tip>
|
||||||
|
|
||||||
Here's an example using string-based knowledge:
|
Here's an example using string-based knowledge:
|
||||||
|
|
||||||
@@ -80,7 +91,14 @@ result = crew.kickoff(inputs={"question": "What city does John live in and how o
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
Here's another example with the `CrewDoclingSource`
|
Here's another example with the `CrewDoclingSource`. The CrewDoclingSource is actually quite versatile and can handle multiple file formats including TXT, PDF, DOCX, HTML, and more.
|
||||||
|
|
||||||
|
<Note>
|
||||||
|
You need to install `docling` for the following example to work: `uv add docling`
|
||||||
|
</Note>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai import LLM, Agent, Crew, Process, Task
|
from crewai import LLM, Agent, Crew, Process, Task
|
||||||
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource
|
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource
|
||||||
@@ -128,39 +146,225 @@ result = crew.kickoff(
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## More Examples
|
||||||
|
|
||||||
|
Here are examples of how to use different types of knowledge sources:
|
||||||
|
|
||||||
|
### Text File Knowledge Source
|
||||||
|
```python
|
||||||
|
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource
|
||||||
|
|
||||||
|
# Create a text file knowledge source
|
||||||
|
text_source = CrewDoclingSource(
|
||||||
|
file_paths=["document.txt", "another.txt"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create crew with text file source on agents or crew level
|
||||||
|
agent = Agent(
|
||||||
|
...
|
||||||
|
knowledge_sources=[text_source]
|
||||||
|
)
|
||||||
|
|
||||||
|
crew = Crew(
|
||||||
|
...
|
||||||
|
knowledge_sources=[text_source]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### PDF Knowledge Source
|
||||||
|
```python
|
||||||
|
from crewai.knowledge.source.pdf_knowledge_source import PDFKnowledgeSource
|
||||||
|
|
||||||
|
# Create a PDF knowledge source
|
||||||
|
pdf_source = PDFKnowledgeSource(
|
||||||
|
file_paths=["document.pdf", "another.pdf"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create crew with PDF knowledge source on agents or crew level
|
||||||
|
agent = Agent(
|
||||||
|
...
|
||||||
|
knowledge_sources=[pdf_source]
|
||||||
|
)
|
||||||
|
|
||||||
|
crew = Crew(
|
||||||
|
...
|
||||||
|
knowledge_sources=[pdf_source]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### CSV Knowledge Source
|
||||||
|
```python
|
||||||
|
from crewai.knowledge.source.csv_knowledge_source import CSVKnowledgeSource
|
||||||
|
|
||||||
|
# Create a CSV knowledge source
|
||||||
|
csv_source = CSVKnowledgeSource(
|
||||||
|
file_paths=["data.csv"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create crew with CSV knowledge source or on agent level
|
||||||
|
agent = Agent(
|
||||||
|
...
|
||||||
|
knowledge_sources=[csv_source]
|
||||||
|
)
|
||||||
|
|
||||||
|
crew = Crew(
|
||||||
|
...
|
||||||
|
knowledge_sources=[csv_source]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Excel Knowledge Source
|
||||||
|
```python
|
||||||
|
from crewai.knowledge.source.excel_knowledge_source import ExcelKnowledgeSource
|
||||||
|
|
||||||
|
# Create an Excel knowledge source
|
||||||
|
excel_source = ExcelKnowledgeSource(
|
||||||
|
file_paths=["spreadsheet.xlsx"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create crew with Excel knowledge source on agents or crew level
|
||||||
|
agent = Agent(
|
||||||
|
...
|
||||||
|
knowledge_sources=[excel_source]
|
||||||
|
)
|
||||||
|
|
||||||
|
crew = Crew(
|
||||||
|
...
|
||||||
|
knowledge_sources=[excel_source]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### JSON Knowledge Source
|
||||||
|
```python
|
||||||
|
from crewai.knowledge.source.json_knowledge_source import JSONKnowledgeSource
|
||||||
|
|
||||||
|
# Create a JSON knowledge source
|
||||||
|
json_source = JSONKnowledgeSource(
|
||||||
|
file_paths=["data.json"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create crew with JSON knowledge source on agents or crew level
|
||||||
|
agent = Agent(
|
||||||
|
...
|
||||||
|
knowledge_sources=[json_source]
|
||||||
|
)
|
||||||
|
|
||||||
|
crew = Crew(
|
||||||
|
...
|
||||||
|
knowledge_sources=[json_source]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
## Knowledge Configuration
|
## Knowledge Configuration
|
||||||
|
|
||||||
### Chunking Configuration
|
### Chunking Configuration
|
||||||
|
|
||||||
Control how content is split for processing by setting the chunk size and overlap.
|
Knowledge sources automatically chunk content for better processing.
|
||||||
|
You can configure chunking behavior in your knowledge sources:
|
||||||
|
|
||||||
```python Code
|
```python
|
||||||
knowledge_source = StringKnowledgeSource(
|
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||||
content="Long content...",
|
|
||||||
chunk_size=4000, # Characters per chunk (default)
|
source = StringKnowledgeSource(
|
||||||
chunk_overlap=200 # Overlap between chunks (default)
|
content="Your content here",
|
||||||
|
chunk_size=4000, # Maximum size of each chunk (default: 4000)
|
||||||
|
chunk_overlap=200 # Overlap between chunks (default: 200)
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Embedder Configuration
|
The chunking configuration helps in:
|
||||||
|
- Breaking down large documents into manageable pieces
|
||||||
|
- Maintaining context through chunk overlap
|
||||||
|
- Optimizing retrieval accuracy
|
||||||
|
|
||||||
You can also configure the embedder for the knowledge store. This is useful if you want to use a different embedder for the knowledge store than the one used for the agents.
|
### Embeddings Configuration
|
||||||
|
|
||||||
```python Code
|
You can also configure the embedder for the knowledge store.
|
||||||
...
|
This is useful if you want to use a different embedder for the knowledge store than the one used for the agents.
|
||||||
|
The `embedder` parameter supports various embedding model providers that include:
|
||||||
|
- `openai`: OpenAI's embedding models
|
||||||
|
- `google`: Google's text embedding models
|
||||||
|
- `azure`: Azure OpenAI embeddings
|
||||||
|
- `ollama`: Local embeddings with Ollama
|
||||||
|
- `vertexai`: Google Cloud VertexAI embeddings
|
||||||
|
- `cohere`: Cohere's embedding models
|
||||||
|
- `voyageai`: VoyageAI's embedding models
|
||||||
|
- `bedrock`: AWS Bedrock embeddings
|
||||||
|
- `huggingface`: Hugging Face models
|
||||||
|
- `watson`: IBM Watson embeddings
|
||||||
|
|
||||||
|
Here's an example of how to configure the embedder for the knowledge store using Google's `text-embedding-004` model:
|
||||||
|
<CodeGroup>
|
||||||
|
```python Example
|
||||||
|
from crewai import Agent, Task, Crew, Process, LLM
|
||||||
|
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Get the GEMINI API key
|
||||||
|
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
|
||||||
|
|
||||||
|
# Create a knowledge source
|
||||||
|
content = "Users name is John. He is 30 years old and lives in San Francisco."
|
||||||
string_source = StringKnowledgeSource(
|
string_source = StringKnowledgeSource(
|
||||||
content="Users name is John. He is 30 years old and lives in San Francisco.",
|
content=content,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Create an LLM with a temperature of 0 to ensure deterministic outputs
|
||||||
|
gemini_llm = LLM(
|
||||||
|
model="gemini/gemini-1.5-pro-002",
|
||||||
|
api_key=GEMINI_API_KEY,
|
||||||
|
temperature=0,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create an agent with the knowledge store
|
||||||
|
agent = Agent(
|
||||||
|
role="About User",
|
||||||
|
goal="You know everything about the user.",
|
||||||
|
backstory="""You are a master at understanding people and their preferences.""",
|
||||||
|
verbose=True,
|
||||||
|
allow_delegation=False,
|
||||||
|
llm=gemini_llm,
|
||||||
|
embedder={
|
||||||
|
"provider": "google",
|
||||||
|
"config": {
|
||||||
|
"model": "models/text-embedding-004",
|
||||||
|
"api_key": GEMINI_API_KEY,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
task = Task(
|
||||||
|
description="Answer the following questions about the user: {question}",
|
||||||
|
expected_output="An answer to the question.",
|
||||||
|
agent=agent,
|
||||||
|
)
|
||||||
|
|
||||||
crew = Crew(
|
crew = Crew(
|
||||||
...
|
agents=[agent],
|
||||||
|
tasks=[task],
|
||||||
|
verbose=True,
|
||||||
|
process=Process.sequential,
|
||||||
knowledge_sources=[string_source],
|
knowledge_sources=[string_source],
|
||||||
embedder={
|
embedder={
|
||||||
"provider": "openai",
|
"provider": "google",
|
||||||
"config": {"model": "text-embedding-3-small"},
|
"config": {
|
||||||
},
|
"model": "models/text-embedding-004",
|
||||||
|
"api_key": GEMINI_API_KEY,
|
||||||
|
}
|
||||||
|
}
|
||||||
)
|
)
|
||||||
```
|
|
||||||
|
|
||||||
|
result = crew.kickoff(inputs={"question": "What city does John live in and how old is he?"})
|
||||||
|
```
|
||||||
|
```text Output
|
||||||
|
# Agent: About User
|
||||||
|
## Task: Answer the following questions about the user: What city does John live in and how old is he?
|
||||||
|
|
||||||
|
# Agent: About User
|
||||||
|
## Final Answer:
|
||||||
|
John is 30 years old and lives in San Francisco.
|
||||||
|
```
|
||||||
|
</CodeGroup>
|
||||||
## Clearing Knowledge
|
## Clearing Knowledge
|
||||||
|
|
||||||
If you need to clear the knowledge stored in CrewAI, you can use the `crewai reset-memories` command with the `--knowledge` option.
|
If you need to clear the knowledge stored in CrewAI, you can use the `crewai reset-memories` command with the `--knowledge` option.
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ Here's a detailed breakdown of supported models and their capabilities, you can
|
|||||||
| GPT-4 | 8,192 tokens | High-accuracy tasks, complex reasoning |
|
| GPT-4 | 8,192 tokens | High-accuracy tasks, complex reasoning |
|
||||||
| GPT-4 Turbo | 128,000 tokens | Long-form content, document analysis |
|
| GPT-4 Turbo | 128,000 tokens | Long-form content, document analysis |
|
||||||
| GPT-4o & GPT-4o-mini | 128,000 tokens | Cost-effective large context processing |
|
| GPT-4o & GPT-4o-mini | 128,000 tokens | Cost-effective large context processing |
|
||||||
|
| o3-mini | 200,000 tokens | Fast reasoning, complex reasoning |
|
||||||
|
|
||||||
<Note>
|
<Note>
|
||||||
1 token ≈ 4 characters in English. For example, 8,192 tokens ≈ 32,768 characters or about 6,000 words.
|
1 token ≈ 4 characters in English. For example, 8,192 tokens ≈ 32,768 characters or about 6,000 words.
|
||||||
@@ -146,10 +147,24 @@ Here's a detailed breakdown of supported models and their capabilities, you can
|
|||||||
Groq is known for its fast inference speeds, making it suitable for real-time applications.
|
Groq is known for its fast inference speeds, making it suitable for real-time applications.
|
||||||
</Tip>
|
</Tip>
|
||||||
</Tab>
|
</Tab>
|
||||||
|
<Tab title="SambaNova">
|
||||||
|
| Model | Context Window | Best For |
|
||||||
|
|-------|---------------|-----------|
|
||||||
|
| Llama 3.1 70B/8B | Up to 131,072 tokens | High-performance, large context tasks |
|
||||||
|
| Llama 3.1 405B | 8,192 tokens | High-performance and output quality |
|
||||||
|
| Llama 3.2 Series | 8,192 tokens | General-purpose tasks, multimodal |
|
||||||
|
| Llama 3.3 70B | Up to 131,072 tokens | High-performance and output quality|
|
||||||
|
| Qwen2 familly | 8,192 tokens | High-performance and output quality |
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
[SambaNova](https://cloud.sambanova.ai/) has several models with fast inference speed at full precision.
|
||||||
|
</Tip>
|
||||||
|
</Tab>
|
||||||
<Tab title="Others">
|
<Tab title="Others">
|
||||||
| Provider | Context Window | Key Features |
|
| Provider | Context Window | Key Features |
|
||||||
|----------|---------------|--------------|
|
|----------|---------------|--------------|
|
||||||
| Deepseek Chat | 128,000 tokens | Specialized in technical discussions |
|
| Deepseek Chat | 64,000 tokens | Specialized in technical discussions |
|
||||||
|
| Deepseek R1 | 64,000 tokens | Affordable reasoning model |
|
||||||
| Claude 3 | Up to 200K tokens | Strong reasoning, code understanding |
|
| Claude 3 | Up to 200K tokens | Strong reasoning, code understanding |
|
||||||
| Gemma Series | 8,192 tokens | Efficient, smaller-scale tasks |
|
| Gemma Series | 8,192 tokens | Efficient, smaller-scale tasks |
|
||||||
|
|
||||||
@@ -230,6 +245,9 @@ There are three ways to configure LLMs in CrewAI. Choose the method that best fi
|
|||||||
# llm: bedrock/amazon.titan-text-express-v1
|
# llm: bedrock/amazon.titan-text-express-v1
|
||||||
# llm: bedrock/meta.llama2-70b-chat-v1
|
# llm: bedrock/meta.llama2-70b-chat-v1
|
||||||
|
|
||||||
|
# Amazon SageMaker Models - Enterprise-grade
|
||||||
|
# llm: sagemaker/<my-endpoint>
|
||||||
|
|
||||||
# Mistral Models - Open source alternative
|
# Mistral Models - Open source alternative
|
||||||
# llm: mistral/mistral-large-latest
|
# llm: mistral/mistral-large-latest
|
||||||
# llm: mistral/mistral-medium-latest
|
# llm: mistral/mistral-medium-latest
|
||||||
@@ -280,6 +298,10 @@ There are three ways to configure LLMs in CrewAI. Choose the method that best fi
|
|||||||
# llm: sambanova/Meta-Llama-3.1-8B-Instruct
|
# llm: sambanova/Meta-Llama-3.1-8B-Instruct
|
||||||
# llm: sambanova/BioMistral-7B
|
# llm: sambanova/BioMistral-7B
|
||||||
# llm: sambanova/Falcon-180B
|
# llm: sambanova/Falcon-180B
|
||||||
|
|
||||||
|
# Open Router Models - Affordable reasoning
|
||||||
|
# llm: openrouter/deepseek/deepseek-r1
|
||||||
|
# llm: openrouter/deepseek/deepseek-chat
|
||||||
```
|
```
|
||||||
|
|
||||||
<Info>
|
<Info>
|
||||||
@@ -441,19 +463,36 @@ Learn how to get the most out of your LLM configuration:
|
|||||||
|
|
||||||
<Accordion title="Google">
|
<Accordion title="Google">
|
||||||
```python Code
|
```python Code
|
||||||
# Option 1. Gemini accessed with an API key.
|
# Option 1: Gemini accessed with an API key.
|
||||||
# https://ai.google.dev/gemini-api/docs/api-key
|
# https://ai.google.dev/gemini-api/docs/api-key
|
||||||
GEMINI_API_KEY=<your-api-key>
|
GEMINI_API_KEY=<your-api-key>
|
||||||
|
|
||||||
# Option 2. Vertex AI IAM credentials for Gemini, Anthropic, and anything in the Model Garden.
|
# Option 2: Vertex AI IAM credentials for Gemini, Anthropic, and Model Garden.
|
||||||
# https://cloud.google.com/vertex-ai/generative-ai/docs/overview
|
# https://cloud.google.com/vertex-ai/generative-ai/docs/overview
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Get credentials:
|
||||||
|
```python Code
|
||||||
|
import json
|
||||||
|
|
||||||
|
file_path = 'path/to/vertex_ai_service_account.json'
|
||||||
|
|
||||||
|
# Load the JSON file
|
||||||
|
with open(file_path, 'r') as file:
|
||||||
|
vertex_credentials = json.load(file)
|
||||||
|
|
||||||
|
# Convert the credentials to a JSON string
|
||||||
|
vertex_credentials_json = json.dumps(vertex_credentials)
|
||||||
|
```
|
||||||
|
|
||||||
Example usage:
|
Example usage:
|
||||||
```python Code
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
llm = LLM(
|
llm = LLM(
|
||||||
model="gemini/gemini-1.5-pro-latest",
|
model="gemini/gemini-1.5-pro-latest",
|
||||||
temperature=0.7
|
temperature=0.7,
|
||||||
|
vertex_credentials=vertex_credentials_json
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
</Accordion>
|
</Accordion>
|
||||||
@@ -493,6 +532,21 @@ Learn how to get the most out of your LLM configuration:
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
</Accordion>
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Amazon SageMaker">
|
||||||
|
```python Code
|
||||||
|
AWS_ACCESS_KEY_ID=<your-access-key>
|
||||||
|
AWS_SECRET_ACCESS_KEY=<your-secret-key>
|
||||||
|
AWS_DEFAULT_REGION=<your-region>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="sagemaker/<my-endpoint>"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
<Accordion title="Mistral">
|
<Accordion title="Mistral">
|
||||||
```python Code
|
```python Code
|
||||||
@@ -649,8 +703,53 @@ Learn how to get the most out of your LLM configuration:
|
|||||||
- Support for long context windows
|
- Support for long context windows
|
||||||
</Info>
|
</Info>
|
||||||
</Accordion>
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Open Router">
|
||||||
|
```python Code
|
||||||
|
OPENROUTER_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```python Code
|
||||||
|
llm = LLM(
|
||||||
|
model="openrouter/deepseek/deepseek-r1",
|
||||||
|
base_url="https://openrouter.ai/api/v1",
|
||||||
|
api_key=OPENROUTER_API_KEY
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
<Info>
|
||||||
|
Open Router models:
|
||||||
|
- openrouter/deepseek/deepseek-r1
|
||||||
|
- openrouter/deepseek/deepseek-chat
|
||||||
|
</Info>
|
||||||
|
</Accordion>
|
||||||
</AccordionGroup>
|
</AccordionGroup>
|
||||||
|
|
||||||
|
## Structured LLM Calls
|
||||||
|
|
||||||
|
CrewAI supports structured responses from LLM calls by allowing you to define a `response_format` using a Pydantic model. This enables the framework to automatically parse and validate the output, making it easier to integrate the response into your application without manual post-processing.
|
||||||
|
|
||||||
|
For example, you can define a Pydantic model to represent the expected response structure and pass it as the `response_format` when instantiating the LLM. The model will then be used to convert the LLM output into a structured Python object.
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
|
class Dog(BaseModel):
|
||||||
|
name: str
|
||||||
|
age: int
|
||||||
|
breed: str
|
||||||
|
|
||||||
|
|
||||||
|
llm = LLM(model="gpt-4o", response_format=Dog)
|
||||||
|
|
||||||
|
response = llm.call(
|
||||||
|
"Analyze the following messages and return the name, age, and breed. "
|
||||||
|
"Meet Kona! She is 3 years old and is a black german shepherd."
|
||||||
|
)
|
||||||
|
print(response)
|
||||||
|
```
|
||||||
|
|
||||||
## Common Issues and Solutions
|
## Common Issues and Solutions
|
||||||
|
|
||||||
<Tabs>
|
<Tabs>
|
||||||
|
|||||||
@@ -58,41 +58,107 @@ my_crew = Crew(
|
|||||||
### Example: Use Custom Memory Instances e.g FAISS as the VectorDB
|
### Example: Use Custom Memory Instances e.g FAISS as the VectorDB
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai import Crew, Agent, Task, Process
|
from crewai import Crew, Process
|
||||||
|
from crewai.memory import LongTermMemory, ShortTermMemory, EntityMemory
|
||||||
|
from crewai.memory.storage import LTMSQLiteStorage, RAGStorage
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
# Assemble your crew with memory capabilities
|
# Assemble your crew with memory capabilities
|
||||||
my_crew = Crew(
|
my_crew: Crew = Crew(
|
||||||
agents=[...],
|
agents = [...],
|
||||||
tasks=[...],
|
tasks = [...],
|
||||||
process="Process.sequential",
|
process = Process.sequential,
|
||||||
memory=True,
|
memory = True,
|
||||||
long_term_memory=EnhanceLongTermMemory(
|
# Long-term memory for persistent storage across sessions
|
||||||
|
long_term_memory = LongTermMemory(
|
||||||
storage=LTMSQLiteStorage(
|
storage=LTMSQLiteStorage(
|
||||||
db_path="/my_data_dir/my_crew1/long_term_memory_storage.db"
|
db_path="/my_crew1/long_term_memory_storage.db"
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
short_term_memory=EnhanceShortTermMemory(
|
# Short-term memory for current context using RAG
|
||||||
storage=CustomRAGStorage(
|
short_term_memory = ShortTermMemory(
|
||||||
crew_name="my_crew",
|
storage = RAGStorage(
|
||||||
storage_type="short_term",
|
embedder_config={
|
||||||
data_dir="//my_data_dir",
|
"provider": "openai",
|
||||||
model=embedder["model"],
|
"config": {
|
||||||
dimension=embedder["dimension"],
|
"model": 'text-embedding-3-small'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
type="short_term",
|
||||||
|
path="/my_crew1/"
|
||||||
|
)
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
entity_memory=EnhanceEntityMemory(
|
# Entity memory for tracking key information about entities
|
||||||
storage=CustomRAGStorage(
|
entity_memory = EntityMemory(
|
||||||
crew_name="my_crew",
|
storage=RAGStorage(
|
||||||
storage_type="entities",
|
embedder_config={
|
||||||
data_dir="//my_data_dir",
|
"provider": "openai",
|
||||||
model=embedder["model"],
|
"config": {
|
||||||
dimension=embedder["dimension"],
|
"model": 'text-embedding-3-small'
|
||||||
),
|
}
|
||||||
|
},
|
||||||
|
type="short_term",
|
||||||
|
path="/my_crew1/"
|
||||||
|
)
|
||||||
),
|
),
|
||||||
verbose=True,
|
verbose=True,
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
When configuring memory storage:
|
||||||
|
- Use environment variables for storage paths (e.g., `CREWAI_STORAGE_DIR`)
|
||||||
|
- Never hardcode sensitive information like database credentials
|
||||||
|
- Consider access permissions for storage directories
|
||||||
|
- Use relative paths when possible to maintain portability
|
||||||
|
|
||||||
|
Example using environment variables:
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from crewai import Crew
|
||||||
|
from crewai.memory import LongTermMemory
|
||||||
|
from crewai.memory.storage import LTMSQLiteStorage
|
||||||
|
|
||||||
|
# Configure storage path using environment variable
|
||||||
|
storage_path = os.getenv("CREWAI_STORAGE_DIR", "./storage")
|
||||||
|
crew = Crew(
|
||||||
|
memory=True,
|
||||||
|
long_term_memory=LongTermMemory(
|
||||||
|
storage=LTMSQLiteStorage(
|
||||||
|
db_path="{storage_path}/memory.db".format(storage_path=storage_path)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration Examples
|
||||||
|
|
||||||
|
### Basic Memory Configuration
|
||||||
|
```python
|
||||||
|
from crewai import Crew
|
||||||
|
from crewai.memory import LongTermMemory
|
||||||
|
|
||||||
|
# Simple memory configuration
|
||||||
|
crew = Crew(memory=True) # Uses default storage locations
|
||||||
|
```
|
||||||
|
|
||||||
|
### Custom Storage Configuration
|
||||||
|
```python
|
||||||
|
from crewai import Crew
|
||||||
|
from crewai.memory import LongTermMemory
|
||||||
|
from crewai.memory.storage import LTMSQLiteStorage
|
||||||
|
|
||||||
|
# Configure custom storage paths
|
||||||
|
crew = Crew(
|
||||||
|
memory=True,
|
||||||
|
long_term_memory=LongTermMemory(
|
||||||
|
storage=LTMSQLiteStorage(db_path="./memory.db")
|
||||||
|
)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
## Integrating Mem0 for Enhanced User Memory
|
## Integrating Mem0 for Enhanced User Memory
|
||||||
|
|
||||||
[Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences.
|
[Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences.
|
||||||
@@ -134,6 +200,23 @@ crew = Crew(
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Memory Configuration Options
|
||||||
|
If you want to access a specific organization and project, you can set the `org_id` and `project_id` parameters in the memory configuration.
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import Crew
|
||||||
|
|
||||||
|
crew = Crew(
|
||||||
|
agents=[...],
|
||||||
|
tasks=[...],
|
||||||
|
verbose=True,
|
||||||
|
memory=True,
|
||||||
|
memory_config={
|
||||||
|
"provider": "mem0",
|
||||||
|
"config": {"user_id": "john", "org_id": "my_org_id", "project_id": "my_project_id"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
## Additional Embedding Providers
|
## Additional Embedding Providers
|
||||||
|
|
||||||
@@ -168,7 +251,12 @@ my_crew = Crew(
|
|||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
memory=True,
|
memory=True,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
embedder=OpenAIEmbeddingFunction(api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"),
|
embedder={
|
||||||
|
"provider": "openai",
|
||||||
|
"config": {
|
||||||
|
"model": 'text-embedding-3-small'
|
||||||
|
}
|
||||||
|
}
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -207,7 +295,7 @@ my_crew = Crew(
|
|||||||
"provider": "google",
|
"provider": "google",
|
||||||
"config": {
|
"config": {
|
||||||
"api_key": "<YOUR_API_KEY>",
|
"api_key": "<YOUR_API_KEY>",
|
||||||
"model_name": "<model_name>"
|
"model": "<model_name>"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -225,13 +313,15 @@ my_crew = Crew(
|
|||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
memory=True,
|
memory=True,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
embedder=OpenAIEmbeddingFunction(
|
embedder={
|
||||||
api_key="YOUR_API_KEY",
|
"provider": "openai",
|
||||||
api_base="YOUR_API_BASE_PATH",
|
"config": {
|
||||||
api_type="azure",
|
"api_key": "YOUR_API_KEY",
|
||||||
api_version="YOUR_API_VERSION",
|
"api_base": "YOUR_API_BASE_PATH",
|
||||||
model_name="text-embedding-3-small"
|
"api_version": "YOUR_API_VERSION",
|
||||||
)
|
"model_name": 'text-embedding-3-small'
|
||||||
|
}
|
||||||
|
}
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -247,12 +337,15 @@ my_crew = Crew(
|
|||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
memory=True,
|
memory=True,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
embedder=GoogleVertexEmbeddingFunction(
|
embedder={
|
||||||
project_id="YOUR_PROJECT_ID",
|
"provider": "vertexai",
|
||||||
region="YOUR_REGION",
|
"config": {
|
||||||
api_key="YOUR_API_KEY",
|
"project_id"="YOUR_PROJECT_ID",
|
||||||
model_name="textembedding-gecko"
|
"region"="YOUR_REGION",
|
||||||
)
|
"api_key"="YOUR_API_KEY",
|
||||||
|
"model_name"="textembedding-gecko"
|
||||||
|
}
|
||||||
|
}
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -271,7 +364,27 @@ my_crew = Crew(
|
|||||||
"provider": "cohere",
|
"provider": "cohere",
|
||||||
"config": {
|
"config": {
|
||||||
"api_key": "YOUR_API_KEY",
|
"api_key": "YOUR_API_KEY",
|
||||||
"model_name": "<model_name>"
|
"model": "<model_name>"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
### Using VoyageAI embeddings
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import Crew, Agent, Task, Process
|
||||||
|
|
||||||
|
my_crew = Crew(
|
||||||
|
agents=[...],
|
||||||
|
tasks=[...],
|
||||||
|
process=Process.sequential,
|
||||||
|
memory=True,
|
||||||
|
verbose=True,
|
||||||
|
embedder={
|
||||||
|
"provider": "voyageai",
|
||||||
|
"config": {
|
||||||
|
"api_key": "YOUR_API_KEY",
|
||||||
|
"model": "<model_name>"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -321,6 +434,33 @@ my_crew = Crew(
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Adding Custom Embedding Function
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import Crew, Agent, Task, Process
|
||||||
|
from chromadb import Documents, EmbeddingFunction, Embeddings
|
||||||
|
|
||||||
|
# Create a custom embedding function
|
||||||
|
class CustomEmbedder(EmbeddingFunction):
|
||||||
|
def __call__(self, input: Documents) -> Embeddings:
|
||||||
|
# generate embeddings
|
||||||
|
return [1, 2, 3] # this is a dummy embedding
|
||||||
|
|
||||||
|
my_crew = Crew(
|
||||||
|
agents=[...],
|
||||||
|
tasks=[...],
|
||||||
|
process=Process.sequential,
|
||||||
|
memory=True,
|
||||||
|
verbose=True,
|
||||||
|
embedder={
|
||||||
|
"provider": "custom",
|
||||||
|
"config": {
|
||||||
|
"embedder": CustomEmbedder()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
### Resetting Memory
|
### Resetting Memory
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ From this point on, your crew will have planning enabled, and the tasks will be
|
|||||||
|
|
||||||
#### Planning LLM
|
#### Planning LLM
|
||||||
|
|
||||||
Now you can define the LLM that will be used to plan the tasks. You can use any ChatOpenAI LLM model available.
|
Now you can define the LLM that will be used to plan the tasks.
|
||||||
|
|
||||||
When running the base case example, you will see something like the output below, which represents the output of the `AgentPlanner`
|
When running the base case example, you will see something like the output below, which represents the output of the `AgentPlanner`
|
||||||
responsible for creating the step-by-step logic to add to the Agents' tasks.
|
responsible for creating the step-by-step logic to add to the Agents' tasks.
|
||||||
@@ -39,7 +39,6 @@ responsible for creating the step-by-step logic to add to the Agents' tasks.
|
|||||||
<CodeGroup>
|
<CodeGroup>
|
||||||
```python Code
|
```python Code
|
||||||
from crewai import Crew, Agent, Task, Process
|
from crewai import Crew, Agent, Task, Process
|
||||||
from langchain_openai import ChatOpenAI
|
|
||||||
|
|
||||||
# Assemble your crew with planning capabilities and custom LLM
|
# Assemble your crew with planning capabilities and custom LLM
|
||||||
my_crew = Crew(
|
my_crew = Crew(
|
||||||
@@ -47,7 +46,7 @@ my_crew = Crew(
|
|||||||
tasks=self.tasks,
|
tasks=self.tasks,
|
||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
planning=True,
|
planning=True,
|
||||||
planning_llm=ChatOpenAI(model="gpt-4o")
|
planning_llm="gpt-4o"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Run the crew
|
# Run the crew
|
||||||
@@ -82,8 +81,8 @@ my_crew.kickoff()
|
|||||||
|
|
||||||
3. **Collect Data:**
|
3. **Collect Data:**
|
||||||
|
|
||||||
- Search for the latest papers, articles, and reports published in 2023 and early 2024.
|
- Search for the latest papers, articles, and reports published in 2024 and early 2025.
|
||||||
- Use keywords like "Large Language Models 2024", "AI LLM advancements", "AI ethics 2024", etc.
|
- Use keywords like "Large Language Models 2025", "AI LLM advancements", "AI ethics 2025", etc.
|
||||||
|
|
||||||
4. **Analyze Findings:**
|
4. **Analyze Findings:**
|
||||||
|
|
||||||
|
|||||||
@@ -23,9 +23,7 @@ Processes enable individual agents to operate as a cohesive unit, streamlining t
|
|||||||
To assign a process to a crew, specify the process type upon crew creation to set the execution strategy. For a hierarchical process, ensure to define `manager_llm` or `manager_agent` for the manager agent.
|
To assign a process to a crew, specify the process type upon crew creation to set the execution strategy. For a hierarchical process, ensure to define `manager_llm` or `manager_agent` for the manager agent.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from crewai import Crew
|
from crewai import Crew, Process
|
||||||
from crewai.process import Process
|
|
||||||
from langchain_openai import ChatOpenAI
|
|
||||||
|
|
||||||
# Example: Creating a crew with a sequential process
|
# Example: Creating a crew with a sequential process
|
||||||
crew = Crew(
|
crew = Crew(
|
||||||
@@ -40,7 +38,7 @@ crew = Crew(
|
|||||||
agents=my_agents,
|
agents=my_agents,
|
||||||
tasks=my_tasks,
|
tasks=my_tasks,
|
||||||
process=Process.hierarchical,
|
process=Process.hierarchical,
|
||||||
manager_llm=ChatOpenAI(model="gpt-4")
|
manager_llm="gpt-4o"
|
||||||
# or
|
# or
|
||||||
# manager_agent=my_manager_agent
|
# manager_agent=my_manager_agent
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -33,11 +33,12 @@ crew = Crew(
|
|||||||
| :------------------------------- | :---------------- | :---------------------------- | :------------------------------------------------------------------------------------------------------------------- |
|
| :------------------------------- | :---------------- | :---------------------------- | :------------------------------------------------------------------------------------------------------------------- |
|
||||||
| **Description** | `description` | `str` | A clear, concise statement of what the task entails. |
|
| **Description** | `description` | `str` | A clear, concise statement of what the task entails. |
|
||||||
| **Expected Output** | `expected_output` | `str` | A detailed description of what the task's completion looks like. |
|
| **Expected Output** | `expected_output` | `str` | A detailed description of what the task's completion looks like. |
|
||||||
| **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. |
|
| **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. |
|
||||||
| **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. |
|
| **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. |
|
||||||
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. |
|
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. |
|
||||||
| **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Other tasks whose outputs will be used as context for this task. |
|
| **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Other tasks whose outputs will be used as context for this task. |
|
||||||
| **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | Whether the task should be executed asynchronously. Defaults to False. |
|
| **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | Whether the task should be executed asynchronously. Defaults to False. |
|
||||||
|
| **Human Input** _(optional)_ | `human_input` | `Optional[bool]` | Whether the task should have a human review the final answer of the agent. Defaults to False. |
|
||||||
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. |
|
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. |
|
||||||
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. |
|
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. |
|
||||||
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. |
|
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. |
|
||||||
@@ -68,7 +69,7 @@ research_task:
|
|||||||
description: >
|
description: >
|
||||||
Conduct a thorough research about {topic}
|
Conduct a thorough research about {topic}
|
||||||
Make sure you find any interesting and relevant information given
|
Make sure you find any interesting and relevant information given
|
||||||
the current year is 2024.
|
the current year is 2025.
|
||||||
expected_output: >
|
expected_output: >
|
||||||
A list with 10 bullet points of the most relevant information about {topic}
|
A list with 10 bullet points of the most relevant information about {topic}
|
||||||
agent: researcher
|
agent: researcher
|
||||||
@@ -154,7 +155,7 @@ research_task = Task(
|
|||||||
description="""
|
description="""
|
||||||
Conduct a thorough research about AI Agents.
|
Conduct a thorough research about AI Agents.
|
||||||
Make sure you find any interesting and relevant information given
|
Make sure you find any interesting and relevant information given
|
||||||
the current year is 2024.
|
the current year is 2025.
|
||||||
""",
|
""",
|
||||||
expected_output="""
|
expected_output="""
|
||||||
A list with 10 bullet points of the most relevant information about AI Agents
|
A list with 10 bullet points of the most relevant information about AI Agents
|
||||||
|
|||||||
@@ -150,15 +150,20 @@ There are two main ways for one to create a CrewAI tool:
|
|||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai.tools import BaseTool
|
from crewai.tools import BaseTool
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
class MyToolInput(BaseModel):
|
||||||
|
"""Input schema for MyCustomTool."""
|
||||||
|
argument: str = Field(..., description="Description of the argument.")
|
||||||
|
|
||||||
class MyCustomTool(BaseTool):
|
class MyCustomTool(BaseTool):
|
||||||
name: str = "Name of my tool"
|
name: str = "Name of my tool"
|
||||||
description: str = "Clear description for what this tool is useful for, your agent will need this information to use it."
|
description: str = "What this tool does. It's vital for effective utilization."
|
||||||
|
args_schema: Type[BaseModel] = MyToolInput
|
||||||
|
|
||||||
def _run(self, argument: str) -> str:
|
def _run(self, argument: str) -> str:
|
||||||
# Implementation goes here
|
# Your tool's logic here
|
||||||
return "Result from custom tool"
|
return "Tool's result"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Utilizing the `tool` Decorator
|
### Utilizing the `tool` Decorator
|
||||||
|
|||||||
@@ -73,9 +73,9 @@ result = crew.kickoff()
|
|||||||
If you're using the hierarchical process and don't want to set a custom manager agent, you can specify the language model for the manager:
|
If you're using the hierarchical process and don't want to set a custom manager agent, you can specify the language model for the manager:
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from langchain_openai import ChatOpenAI
|
from crewai import LLM
|
||||||
|
|
||||||
manager_llm = ChatOpenAI(model_name="gpt-4")
|
manager_llm = LLM(model="gpt-4o")
|
||||||
|
|
||||||
crew = Crew(
|
crew = Crew(
|
||||||
agents=[researcher, writer],
|
agents=[researcher, writer],
|
||||||
|
|||||||
@@ -60,12 +60,12 @@ writer = Agent(
|
|||||||
# Create tasks for your agents
|
# Create tasks for your agents
|
||||||
task1 = Task(
|
task1 = Task(
|
||||||
description=(
|
description=(
|
||||||
"Conduct a comprehensive analysis of the latest advancements in AI in 2024. "
|
"Conduct a comprehensive analysis of the latest advancements in AI in 2025. "
|
||||||
"Identify key trends, breakthrough technologies, and potential industry impacts. "
|
"Identify key trends, breakthrough technologies, and potential industry impacts. "
|
||||||
"Compile your findings in a detailed report. "
|
"Compile your findings in a detailed report. "
|
||||||
"Make sure to check with a human if the draft is good before finalizing your answer."
|
"Make sure to check with a human if the draft is good before finalizing your answer."
|
||||||
),
|
),
|
||||||
expected_output='A comprehensive full report on the latest AI advancements in 2024, leave nothing out',
|
expected_output='A comprehensive full report on the latest AI advancements in 2025, leave nothing out',
|
||||||
agent=researcher,
|
agent=researcher,
|
||||||
human_input=True
|
human_input=True
|
||||||
)
|
)
|
||||||
@@ -76,7 +76,7 @@ task2 = Task(
|
|||||||
"Your post should be informative yet accessible, catering to a tech-savvy audience. "
|
"Your post should be informative yet accessible, catering to a tech-savvy audience. "
|
||||||
"Aim for a narrative that captures the essence of these breakthroughs and their implications for the future."
|
"Aim for a narrative that captures the essence of these breakthroughs and their implications for the future."
|
||||||
),
|
),
|
||||||
expected_output='A compelling 3 paragraphs blog post formatted as markdown about the latest AI advancements in 2024',
|
expected_output='A compelling 3 paragraphs blog post formatted as markdown about the latest AI advancements in 2025',
|
||||||
agent=writer,
|
agent=writer,
|
||||||
human_input=True
|
human_input=True
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ LiteLLM supports a wide range of providers, including but not limited to:
|
|||||||
- Azure OpenAI
|
- Azure OpenAI
|
||||||
- AWS (Bedrock, SageMaker)
|
- AWS (Bedrock, SageMaker)
|
||||||
- Cohere
|
- Cohere
|
||||||
|
- VoyageAI
|
||||||
- Hugging Face
|
- Hugging Face
|
||||||
- Ollama
|
- Ollama
|
||||||
- Mistral AI
|
- Mistral AI
|
||||||
@@ -32,6 +33,7 @@ LiteLLM supports a wide range of providers, including but not limited to:
|
|||||||
- Cloudflare Workers AI
|
- Cloudflare Workers AI
|
||||||
- DeepInfra
|
- DeepInfra
|
||||||
- Groq
|
- Groq
|
||||||
|
- SambaNova
|
||||||
- [NVIDIA NIMs](https://docs.api.nvidia.com/nim/reference/models-1)
|
- [NVIDIA NIMs](https://docs.api.nvidia.com/nim/reference/models-1)
|
||||||
- And many more!
|
- And many more!
|
||||||
|
|
||||||
|
|||||||
206
docs/how-to/mlflow-observability.mdx
Normal file
206
docs/how-to/mlflow-observability.mdx
Normal file
@@ -0,0 +1,206 @@
|
|||||||
|
---
|
||||||
|
title: Agent Monitoring with MLflow
|
||||||
|
description: Quickly start monitoring your Agents with MLflow.
|
||||||
|
icon: bars-staggered
|
||||||
|
---
|
||||||
|
|
||||||
|
# MLflow Overview
|
||||||
|
|
||||||
|
[MLflow](https://mlflow.org/) is an open-source platform to assist machine learning practitioners and teams in handling the complexities of the machine learning process.
|
||||||
|
|
||||||
|
It provides a tracing feature that enhances LLM observability in your Generative AI applications by capturing detailed information about the execution of your application’s services.
|
||||||
|
Tracing provides a way to record the inputs, outputs, and metadata associated with each intermediate step of a request, enabling you to easily pinpoint the source of bugs and unexpected behaviors.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- **Tracing Dashboard**: Monitor activities of your crewAI agents with detailed dashboards that include inputs, outputs and metadata of spans.
|
||||||
|
- **Automated Tracing**: A fully automated integration with crewAI, which can be enabled by running `mlflow.crewai.autolog()`.
|
||||||
|
- **Manual Trace Instrumentation with minor efforts**: Customize trace instrumentation through MLflow's high-level fluent APIs such as decorators, function wrappers and context managers.
|
||||||
|
- **OpenTelemetry Compatibility**: MLflow Tracing supports exporting traces to an OpenTelemetry Collector, which can then be used to export traces to various backends such as Jaeger, Zipkin, and AWS X-Ray.
|
||||||
|
- **Package and Deploy Agents**: Package and deploy your crewAI agents to an inference server with a variety of deployment targets.
|
||||||
|
- **Securely Host LLMs**: Host multiple LLM from various providers in one unified endpoint through MFflow gateway.
|
||||||
|
- **Evaluation**: Evaluate your crewAI agents with a wide range of metrics using a convenient API `mlflow.evaluate()`.
|
||||||
|
|
||||||
|
## Setup Instructions
|
||||||
|
|
||||||
|
<Steps>
|
||||||
|
<Step title="Install MLflow package">
|
||||||
|
```shell
|
||||||
|
# The crewAI integration is available in mlflow>=2.19.0
|
||||||
|
pip install mlflow
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
<Step title="Start MFflow tracking server">
|
||||||
|
```shell
|
||||||
|
# This process is optional, but it is recommended to use MLflow tracking server for better visualization and broader features.
|
||||||
|
mlflow server
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
<Step title="Initialize MLflow in Your Application">
|
||||||
|
Add the following two lines to your application code:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import mlflow
|
||||||
|
|
||||||
|
mlflow.crewai.autolog()
|
||||||
|
|
||||||
|
# Optional: Set a tracking URI and an experiment name if you have a tracking server
|
||||||
|
mlflow.set_tracking_uri("http://localhost:5000")
|
||||||
|
mlflow.set_experiment("CrewAI")
|
||||||
|
```
|
||||||
|
|
||||||
|
Example Usage for tracing CrewAI Agents:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from crewai import Agent, Crew, Task
|
||||||
|
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||||
|
from crewai_tools import SerperDevTool, WebsiteSearchTool
|
||||||
|
|
||||||
|
from textwrap import dedent
|
||||||
|
|
||||||
|
content = "Users name is John. He is 30 years old and lives in San Francisco."
|
||||||
|
string_source = StringKnowledgeSource(
|
||||||
|
content=content, metadata={"preference": "personal"}
|
||||||
|
)
|
||||||
|
|
||||||
|
search_tool = WebsiteSearchTool()
|
||||||
|
|
||||||
|
|
||||||
|
class TripAgents:
|
||||||
|
def city_selection_agent(self):
|
||||||
|
return Agent(
|
||||||
|
role="City Selection Expert",
|
||||||
|
goal="Select the best city based on weather, season, and prices",
|
||||||
|
backstory="An expert in analyzing travel data to pick ideal destinations",
|
||||||
|
tools=[
|
||||||
|
search_tool,
|
||||||
|
],
|
||||||
|
verbose=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
def local_expert(self):
|
||||||
|
return Agent(
|
||||||
|
role="Local Expert at this city",
|
||||||
|
goal="Provide the BEST insights about the selected city",
|
||||||
|
backstory="""A knowledgeable local guide with extensive information
|
||||||
|
about the city, it's attractions and customs""",
|
||||||
|
tools=[search_tool],
|
||||||
|
verbose=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TripTasks:
|
||||||
|
def identify_task(self, agent, origin, cities, interests, range):
|
||||||
|
return Task(
|
||||||
|
description=dedent(
|
||||||
|
f"""
|
||||||
|
Analyze and select the best city for the trip based
|
||||||
|
on specific criteria such as weather patterns, seasonal
|
||||||
|
events, and travel costs. This task involves comparing
|
||||||
|
multiple cities, considering factors like current weather
|
||||||
|
conditions, upcoming cultural or seasonal events, and
|
||||||
|
overall travel expenses.
|
||||||
|
Your final answer must be a detailed
|
||||||
|
report on the chosen city, and everything you found out
|
||||||
|
about it, including the actual flight costs, weather
|
||||||
|
forecast and attractions.
|
||||||
|
|
||||||
|
Traveling from: {origin}
|
||||||
|
City Options: {cities}
|
||||||
|
Trip Date: {range}
|
||||||
|
Traveler Interests: {interests}
|
||||||
|
"""
|
||||||
|
),
|
||||||
|
agent=agent,
|
||||||
|
expected_output="Detailed report on the chosen city including flight costs, weather forecast, and attractions",
|
||||||
|
)
|
||||||
|
|
||||||
|
def gather_task(self, agent, origin, interests, range):
|
||||||
|
return Task(
|
||||||
|
description=dedent(
|
||||||
|
f"""
|
||||||
|
As a local expert on this city you must compile an
|
||||||
|
in-depth guide for someone traveling there and wanting
|
||||||
|
to have THE BEST trip ever!
|
||||||
|
Gather information about key attractions, local customs,
|
||||||
|
special events, and daily activity recommendations.
|
||||||
|
Find the best spots to go to, the kind of place only a
|
||||||
|
local would know.
|
||||||
|
This guide should provide a thorough overview of what
|
||||||
|
the city has to offer, including hidden gems, cultural
|
||||||
|
hotspots, must-visit landmarks, weather forecasts, and
|
||||||
|
high level costs.
|
||||||
|
The final answer must be a comprehensive city guide,
|
||||||
|
rich in cultural insights and practical tips,
|
||||||
|
tailored to enhance the travel experience.
|
||||||
|
|
||||||
|
Trip Date: {range}
|
||||||
|
Traveling from: {origin}
|
||||||
|
Traveler Interests: {interests}
|
||||||
|
"""
|
||||||
|
),
|
||||||
|
agent=agent,
|
||||||
|
expected_output="Comprehensive city guide including hidden gems, cultural hotspots, and practical travel tips",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TripCrew:
|
||||||
|
def __init__(self, origin, cities, date_range, interests):
|
||||||
|
self.cities = cities
|
||||||
|
self.origin = origin
|
||||||
|
self.interests = interests
|
||||||
|
self.date_range = date_range
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
agents = TripAgents()
|
||||||
|
tasks = TripTasks()
|
||||||
|
|
||||||
|
city_selector_agent = agents.city_selection_agent()
|
||||||
|
local_expert_agent = agents.local_expert()
|
||||||
|
|
||||||
|
identify_task = tasks.identify_task(
|
||||||
|
city_selector_agent,
|
||||||
|
self.origin,
|
||||||
|
self.cities,
|
||||||
|
self.interests,
|
||||||
|
self.date_range,
|
||||||
|
)
|
||||||
|
gather_task = tasks.gather_task(
|
||||||
|
local_expert_agent, self.origin, self.interests, self.date_range
|
||||||
|
)
|
||||||
|
|
||||||
|
crew = Crew(
|
||||||
|
agents=[city_selector_agent, local_expert_agent],
|
||||||
|
tasks=[identify_task, gather_task],
|
||||||
|
verbose=True,
|
||||||
|
memory=True,
|
||||||
|
knowledge={
|
||||||
|
"sources": [string_source],
|
||||||
|
"metadata": {"preference": "personal"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
result = crew.kickoff()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
trip_crew = TripCrew("California", "Tokyo", "Dec 12 - Dec 20", "sports")
|
||||||
|
result = trip_crew.run()
|
||||||
|
|
||||||
|
print(result)
|
||||||
|
```
|
||||||
|
Refer to [MLflow Tracing Documentation](https://mlflow.org/docs/latest/llms/tracing/index.html) for more configurations and use cases.
|
||||||
|
</Step>
|
||||||
|
<Step title="Visualize Activities of Agents">
|
||||||
|
Now traces for your crewAI agents are captured by MLflow.
|
||||||
|
Let's visit MLflow tracking server to view the traces and get insights into your Agents.
|
||||||
|
|
||||||
|
Open `127.0.0.1:5000` on your browser to visit MLflow tracking server.
|
||||||
|
<Frame caption="MLflow Tracing Dashboard">
|
||||||
|
<img src="/images/mlflow1.png" alt="MLflow tracing example with crewai" />
|
||||||
|
</Frame>
|
||||||
|
</Step>
|
||||||
|
</Steps>
|
||||||
|
|
||||||
@@ -1,14 +1,14 @@
|
|||||||
---
|
---
|
||||||
title: Using Multimodal Agents
|
title: Using Multimodal Agents
|
||||||
description: Learn how to enable and use multimodal capabilities in your agents for processing images and other non-text content within the CrewAI framework.
|
description: Learn how to enable and use multimodal capabilities in your agents for processing images and other non-text content within the CrewAI framework.
|
||||||
icon: image
|
icon: video
|
||||||
---
|
---
|
||||||
|
|
||||||
# Using Multimodal Agents
|
## Using Multimodal Agents
|
||||||
|
|
||||||
CrewAI supports multimodal agents that can process both text and non-text content like images. This guide will show you how to enable and use multimodal capabilities in your agents.
|
CrewAI supports multimodal agents that can process both text and non-text content like images. This guide will show you how to enable and use multimodal capabilities in your agents.
|
||||||
|
|
||||||
## Enabling Multimodal Capabilities
|
### Enabling Multimodal Capabilities
|
||||||
|
|
||||||
To create a multimodal agent, simply set the `multimodal` parameter to `True` when initializing your agent:
|
To create a multimodal agent, simply set the `multimodal` parameter to `True` when initializing your agent:
|
||||||
|
|
||||||
@@ -25,7 +25,7 @@ agent = Agent(
|
|||||||
|
|
||||||
When you set `multimodal=True`, the agent is automatically configured with the necessary tools for handling non-text content, including the `AddImageTool`.
|
When you set `multimodal=True`, the agent is automatically configured with the necessary tools for handling non-text content, including the `AddImageTool`.
|
||||||
|
|
||||||
## Working with Images
|
### Working with Images
|
||||||
|
|
||||||
The multimodal agent comes pre-configured with the `AddImageTool`, which allows it to process images. You don't need to manually add this tool - it's automatically included when you enable multimodal capabilities.
|
The multimodal agent comes pre-configured with the `AddImageTool`, which allows it to process images. You don't need to manually add this tool - it's automatically included when you enable multimodal capabilities.
|
||||||
|
|
||||||
@@ -45,6 +45,7 @@ image_analyst = Agent(
|
|||||||
# Create a task for image analysis
|
# Create a task for image analysis
|
||||||
task = Task(
|
task = Task(
|
||||||
description="Analyze the product image at https://example.com/product.jpg and provide a detailed description",
|
description="Analyze the product image at https://example.com/product.jpg and provide a detailed description",
|
||||||
|
expected_output="A detailed description of the product image",
|
||||||
agent=image_analyst
|
agent=image_analyst
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -81,6 +82,7 @@ inspection_task = Task(
|
|||||||
3. Compliance with standards
|
3. Compliance with standards
|
||||||
Provide a detailed report highlighting any issues found.
|
Provide a detailed report highlighting any issues found.
|
||||||
""",
|
""",
|
||||||
|
expected_output="A detailed report highlighting any issues found",
|
||||||
agent=expert_analyst
|
agent=expert_analyst
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -108,7 +110,7 @@ The multimodal agent will automatically handle the image processing through its
|
|||||||
- Process image content with optional context or specific questions
|
- Process image content with optional context or specific questions
|
||||||
- Provide analysis and insights based on the visual information and task requirements
|
- Provide analysis and insights based on the visual information and task requirements
|
||||||
|
|
||||||
## Best Practices
|
### Best Practices
|
||||||
|
|
||||||
When working with multimodal agents, keep these best practices in mind:
|
When working with multimodal agents, keep these best practices in mind:
|
||||||
|
|
||||||
|
|||||||
202
docs/how-to/portkey-observability.mdx
Normal file
202
docs/how-to/portkey-observability.mdx
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
---
|
||||||
|
title: Portkey Observability and Guardrails
|
||||||
|
description: How to use Portkey with CrewAI
|
||||||
|
icon: key
|
||||||
|
---
|
||||||
|
|
||||||
|
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-CrewAI.png" alt="Portkey CrewAI Header Image" width="70%" />
|
||||||
|
|
||||||
|
|
||||||
|
[Portkey](https://portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai) is a 2-line upgrade to make your CrewAI agents reliable, cost-efficient, and fast.
|
||||||
|
|
||||||
|
Portkey adds 4 core production capabilities to any CrewAI agent:
|
||||||
|
1. Routing to **200+ LLMs**
|
||||||
|
2. Making each LLM call more robust
|
||||||
|
3. Full-stack tracing & cost, performance analytics
|
||||||
|
4. Real-time guardrails to enforce behavior
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
<Steps>
|
||||||
|
<Step title="Install CrewAI and Portkey">
|
||||||
|
```bash
|
||||||
|
pip install -qU crewai portkey-ai
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
<Step title="Configure the LLM Client">
|
||||||
|
To build CrewAI Agents with Portkey, you'll need two keys:
|
||||||
|
- **Portkey API Key**: Sign up on the [Portkey app](https://app.portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai) and copy your API key
|
||||||
|
- **Virtual Key**: Virtual Keys securely manage your LLM API keys in one place. Store your LLM provider API keys securely in Portkey's vault
|
||||||
|
|
||||||
|
```python
|
||||||
|
from crewai import LLM
|
||||||
|
from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
|
||||||
|
|
||||||
|
gpt_llm = LLM(
|
||||||
|
model="gpt-4",
|
||||||
|
base_url=PORTKEY_GATEWAY_URL,
|
||||||
|
api_key="dummy", # We are using Virtual key
|
||||||
|
extra_headers=createHeaders(
|
||||||
|
api_key="YOUR_PORTKEY_API_KEY",
|
||||||
|
virtual_key="YOUR_VIRTUAL_KEY", # Enter your Virtual key from Portkey
|
||||||
|
)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
<Step title="Create and Run Your First Agent">
|
||||||
|
```python
|
||||||
|
from crewai import Agent, Task, Crew
|
||||||
|
|
||||||
|
# Define your agents with roles and goals
|
||||||
|
coder = Agent(
|
||||||
|
role='Software developer',
|
||||||
|
goal='Write clear, concise code on demand',
|
||||||
|
backstory='An expert coder with a keen eye for software trends.',
|
||||||
|
llm=gpt_llm
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create tasks for your agents
|
||||||
|
task1 = Task(
|
||||||
|
description="Define the HTML for making a simple website with heading- Hello World! Portkey is working!",
|
||||||
|
expected_output="A clear and concise HTML code",
|
||||||
|
agent=coder
|
||||||
|
)
|
||||||
|
|
||||||
|
# Instantiate your crew
|
||||||
|
crew = Crew(
|
||||||
|
agents=[coder],
|
||||||
|
tasks=[task1],
|
||||||
|
)
|
||||||
|
|
||||||
|
result = crew.kickoff()
|
||||||
|
print(result)
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
</Steps>
|
||||||
|
|
||||||
|
## Key Features
|
||||||
|
|
||||||
|
| Feature | Description |
|
||||||
|
|:--------|:------------|
|
||||||
|
| 🌐 Multi-LLM Support | Access OpenAI, Anthropic, Gemini, Azure, and 250+ providers through a unified interface |
|
||||||
|
| 🛡️ Production Reliability | Implement retries, timeouts, load balancing, and fallbacks |
|
||||||
|
| 📊 Advanced Observability | Track 40+ metrics including costs, tokens, latency, and custom metadata |
|
||||||
|
| 🔍 Comprehensive Logging | Debug with detailed execution traces and function call logs |
|
||||||
|
| 🚧 Security Controls | Set budget limits and implement role-based access control |
|
||||||
|
| 🔄 Performance Analytics | Capture and analyze feedback for continuous improvement |
|
||||||
|
| 💾 Intelligent Caching | Reduce costs and latency with semantic or simple caching |
|
||||||
|
|
||||||
|
|
||||||
|
## Production Features with Portkey Configs
|
||||||
|
|
||||||
|
All features mentioned below are through Portkey's Config system. Portkey's Config system allows you to define routing strategies using simple JSON objects in your LLM API calls. You can create and manage Configs directly in your code or through the Portkey Dashboard. Each Config has a unique ID for easy reference.
|
||||||
|
|
||||||
|
<Frame>
|
||||||
|
<img src="https://raw.githubusercontent.com/Portkey-AI/docs-core/refs/heads/main/images/libraries/libraries-3.avif"/>
|
||||||
|
</Frame>
|
||||||
|
|
||||||
|
|
||||||
|
### 1. Use 250+ LLMs
|
||||||
|
Access various LLMs like Anthropic, Gemini, Mistral, Azure OpenAI, and more with minimal code changes. Switch between providers or use them together seamlessly. [Learn more about Universal API](https://portkey.ai/docs/product/ai-gateway/universal-api)
|
||||||
|
|
||||||
|
|
||||||
|
Easily switch between different LLM providers:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Anthropic Configuration
|
||||||
|
anthropic_llm = LLM(
|
||||||
|
model="claude-3-5-sonnet-latest",
|
||||||
|
base_url=PORTKEY_GATEWAY_URL,
|
||||||
|
api_key="dummy",
|
||||||
|
extra_headers=createHeaders(
|
||||||
|
api_key="YOUR_PORTKEY_API_KEY",
|
||||||
|
virtual_key="YOUR_ANTHROPIC_VIRTUAL_KEY", #You don't need provider when using Virtual keys
|
||||||
|
trace_id="anthropic_agent"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Azure OpenAI Configuration
|
||||||
|
azure_llm = LLM(
|
||||||
|
model="gpt-4",
|
||||||
|
base_url=PORTKEY_GATEWAY_URL,
|
||||||
|
api_key="dummy",
|
||||||
|
extra_headers=createHeaders(
|
||||||
|
api_key="YOUR_PORTKEY_API_KEY",
|
||||||
|
virtual_key="YOUR_AZURE_VIRTUAL_KEY", #You don't need provider when using Virtual keys
|
||||||
|
trace_id="azure_agent"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### 2. Caching
|
||||||
|
Improve response times and reduce costs with two powerful caching modes:
|
||||||
|
- **Simple Cache**: Perfect for exact matches
|
||||||
|
- **Semantic Cache**: Matches responses for requests that are semantically similar
|
||||||
|
[Learn more about Caching](https://portkey.ai/docs/product/ai-gateway/cache-simple-and-semantic)
|
||||||
|
|
||||||
|
```py
|
||||||
|
config = {
|
||||||
|
"cache": {
|
||||||
|
"mode": "semantic", # or "simple" for exact matching
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Production Reliability
|
||||||
|
Portkey provides comprehensive reliability features:
|
||||||
|
- **Automatic Retries**: Handle temporary failures gracefully
|
||||||
|
- **Request Timeouts**: Prevent hanging operations
|
||||||
|
- **Conditional Routing**: Route requests based on specific conditions
|
||||||
|
- **Fallbacks**: Set up automatic provider failovers
|
||||||
|
- **Load Balancing**: Distribute requests efficiently
|
||||||
|
|
||||||
|
[Learn more about Reliability Features](https://portkey.ai/docs/product/ai-gateway/)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### 4. Metrics
|
||||||
|
|
||||||
|
Agent runs are complex. Portkey automatically logs **40+ comprehensive metrics** for your AI agents, including cost, tokens used, latency, etc. Whether you need a broad overview or granular insights into your agent runs, Portkey's customizable filters provide the metrics you need.
|
||||||
|
|
||||||
|
|
||||||
|
- Cost per agent interaction
|
||||||
|
- Response times and latency
|
||||||
|
- Token usage and efficiency
|
||||||
|
- Success/failure rates
|
||||||
|
- Cache hit rates
|
||||||
|
|
||||||
|
<img src="https://github.com/siddharthsambharia-portkey/Portkey-Product-Images/blob/main/Portkey-Dashboard.png?raw=true" width="70%" alt="Portkey Dashboard" />
|
||||||
|
|
||||||
|
### 5. Detailed Logging
|
||||||
|
Logs are essential for understanding agent behavior, diagnosing issues, and improving performance. They provide a detailed record of agent activities and tool use, which is crucial for debugging and optimizing processes.
|
||||||
|
|
||||||
|
|
||||||
|
Access a dedicated section to view records of agent executions, including parameters, outcomes, function calls, and errors. Filter logs based on multiple parameters such as trace ID, model, tokens used, and metadata.
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><b>Traces</b></summary>
|
||||||
|
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-Traces.png" alt="Portkey Traces" width="70%" />
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><b>Logs</b></summary>
|
||||||
|
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-Logs.png" alt="Portkey Logs" width="70%" />
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### 6. Enterprise Security Features
|
||||||
|
- Set budget limit and rate limts per Virtual Key (disposable API keys)
|
||||||
|
- Implement role-based access control
|
||||||
|
- Track system changes with audit logs
|
||||||
|
- Configure data retention policies
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
For detailed information on creating and managing Configs, visit the [Portkey documentation](https://docs.portkey.ai/product/ai-gateway/configs).
|
||||||
|
|
||||||
|
## Resources
|
||||||
|
|
||||||
|
- [📘 Portkey Documentation](https://docs.portkey.ai)
|
||||||
|
- [📊 Portkey Dashboard](https://app.portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai)
|
||||||
|
- [🐦 Twitter](https://twitter.com/portkeyai)
|
||||||
|
- [💬 Discord Community](https://discord.gg/DD7vgKK299)
|
||||||
BIN
docs/images/mlflow-tracing.gif
Normal file
BIN
docs/images/mlflow-tracing.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 MiB |
BIN
docs/images/mlflow1.png
Normal file
BIN
docs/images/mlflow1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 382 KiB |
@@ -15,10 +15,48 @@ icon: wrench
|
|||||||
If you need to update Python, visit [python.org/downloads](https://python.org/downloads)
|
If you need to update Python, visit [python.org/downloads](https://python.org/downloads)
|
||||||
</Note>
|
</Note>
|
||||||
|
|
||||||
|
# Setting Up Your Environment
|
||||||
|
|
||||||
|
Before installing CrewAI, it's recommended to set up a virtual environment. This helps isolate your project dependencies and avoid conflicts.
|
||||||
|
|
||||||
|
<Steps>
|
||||||
|
<Step title="Create a Virtual Environment">
|
||||||
|
Choose your preferred method to create a virtual environment:
|
||||||
|
|
||||||
|
**Using venv (Python's built-in tool):**
|
||||||
|
```shell Terminal
|
||||||
|
python3 -m venv .venv
|
||||||
|
```
|
||||||
|
|
||||||
|
**Using conda:**
|
||||||
|
```shell Terminal
|
||||||
|
conda create -n crewai-env python=3.12
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
|
||||||
|
<Step title="Activate the Virtual Environment">
|
||||||
|
Activate your virtual environment based on your platform:
|
||||||
|
|
||||||
|
**On macOS/Linux (venv):**
|
||||||
|
```shell Terminal
|
||||||
|
source .venv/bin/activate
|
||||||
|
```
|
||||||
|
|
||||||
|
**On Windows (venv):**
|
||||||
|
```shell Terminal
|
||||||
|
.venv\Scripts\activate
|
||||||
|
```
|
||||||
|
|
||||||
|
**Using conda (all platforms):**
|
||||||
|
```shell Terminal
|
||||||
|
conda activate crewai-env
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
</Steps>
|
||||||
|
|
||||||
# Installing CrewAI
|
# Installing CrewAI
|
||||||
|
|
||||||
CrewAI is a flexible and powerful AI framework that enables you to create and manage AI agents, tools, and tasks efficiently.
|
Now let's get you set up! 🚀
|
||||||
Let's get you set up! 🚀
|
|
||||||
|
|
||||||
<Steps>
|
<Steps>
|
||||||
<Step title="Install CrewAI">
|
<Step title="Install CrewAI">
|
||||||
@@ -72,9 +110,9 @@ Let's get you set up! 🚀
|
|||||||
|
|
||||||
# Creating a New Project
|
# Creating a New Project
|
||||||
|
|
||||||
<Info>
|
<Tip>
|
||||||
We recommend using the YAML Template scaffolding for a structured approach to defining agents and tasks.
|
We recommend using the YAML Template scaffolding for a structured approach to defining agents and tasks.
|
||||||
</Info>
|
</Tip>
|
||||||
|
|
||||||
<Steps>
|
<Steps>
|
||||||
<Step title="Generate Project Structure">
|
<Step title="Generate Project Structure">
|
||||||
@@ -104,7 +142,18 @@ Let's get you set up! 🚀
|
|||||||
└── tasks.yaml
|
└── tasks.yaml
|
||||||
```
|
```
|
||||||
</Frame>
|
</Frame>
|
||||||
</Step>
|
</Step>
|
||||||
|
|
||||||
|
<Step title="Install Additional Tools">
|
||||||
|
You can install additional tools using UV:
|
||||||
|
```shell Terminal
|
||||||
|
uv add <tool-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
UV is our preferred package manager as it's significantly faster than pip and provides better dependency resolution.
|
||||||
|
</Tip>
|
||||||
|
</Step>
|
||||||
|
|
||||||
<Step title="Customize Your Project">
|
<Step title="Customize Your Project">
|
||||||
Your project will contain these essential files:
|
Your project will contain these essential files:
|
||||||
|
|||||||
45
docs/memory.md
Normal file
45
docs/memory.md
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
# Memory in CrewAI
|
||||||
|
|
||||||
|
CrewAI provides a robust memory system that allows agents to retain and recall information from previous interactions.
|
||||||
|
|
||||||
|
## Configuring Embedding Providers
|
||||||
|
|
||||||
|
CrewAI supports multiple embedding providers for memory functionality:
|
||||||
|
|
||||||
|
- OpenAI (default) - Requires `OPENAI_API_KEY`
|
||||||
|
- Ollama - Requires `CREWAI_OLLAMA_URL` (defaults to "http://localhost:11434/api/embeddings")
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
Configure the embedding provider using these environment variables:
|
||||||
|
|
||||||
|
- `CREWAI_EMBEDDING_PROVIDER`: Provider name (default: "openai")
|
||||||
|
- `CREWAI_EMBEDDING_MODEL`: Model name (default: "text-embedding-3-small")
|
||||||
|
- `CREWAI_OLLAMA_URL`: URL for Ollama API (when using Ollama provider)
|
||||||
|
|
||||||
|
### Example Configuration
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Using OpenAI (default)
|
||||||
|
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
|
# Using Ollama
|
||||||
|
os.environ["CREWAI_EMBEDDING_PROVIDER"] = "ollama"
|
||||||
|
os.environ["CREWAI_EMBEDDING_MODEL"] = "llama2" # or any other model supported by your Ollama instance
|
||||||
|
os.environ["CREWAI_OLLAMA_URL"] = "http://localhost:11434/api/embeddings" # optional, this is the default
|
||||||
|
```
|
||||||
|
|
||||||
|
## Memory Usage
|
||||||
|
|
||||||
|
When an agent has memory enabled, it can access and store information from previous interactions:
|
||||||
|
|
||||||
|
```python
|
||||||
|
agent = Agent(
|
||||||
|
role="Researcher",
|
||||||
|
goal="Research AI topics",
|
||||||
|
backstory="You're an AI researcher",
|
||||||
|
memory=True # Enable memory for this agent
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
The memory system uses embeddings to store and retrieve relevant information, allowing agents to maintain context across multiple interactions and tasks.
|
||||||
@@ -91,6 +91,7 @@
|
|||||||
"how-to/custom-manager-agent",
|
"how-to/custom-manager-agent",
|
||||||
"how-to/llm-connections",
|
"how-to/llm-connections",
|
||||||
"how-to/customizing-agents",
|
"how-to/customizing-agents",
|
||||||
|
"how-to/multimodal-agents",
|
||||||
"how-to/coding-agents",
|
"how-to/coding-agents",
|
||||||
"how-to/force-tool-output-as-result",
|
"how-to/force-tool-output-as-result",
|
||||||
"how-to/human-input-on-execution",
|
"how-to/human-input-on-execution",
|
||||||
@@ -100,7 +101,9 @@
|
|||||||
"how-to/conditional-tasks",
|
"how-to/conditional-tasks",
|
||||||
"how-to/agentops-observability",
|
"how-to/agentops-observability",
|
||||||
"how-to/langtrace-observability",
|
"how-to/langtrace-observability",
|
||||||
"how-to/openlit-observability"
|
"how-to/mlflow-observability",
|
||||||
|
"how-to/openlit-observability",
|
||||||
|
"how-to/portkey-observability"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ Follow the steps below to get crewing! 🚣♂️
|
|||||||
description: >
|
description: >
|
||||||
Conduct a thorough research about {topic}
|
Conduct a thorough research about {topic}
|
||||||
Make sure you find any interesting and relevant information given
|
Make sure you find any interesting and relevant information given
|
||||||
the current year is 2024.
|
the current year is 2025.
|
||||||
expected_output: >
|
expected_output: >
|
||||||
A list with 10 bullet points of the most relevant information about {topic}
|
A list with 10 bullet points of the most relevant information about {topic}
|
||||||
agent: researcher
|
agent: researcher
|
||||||
@@ -195,10 +195,10 @@ Follow the steps below to get crewing! 🚣♂️
|
|||||||
|
|
||||||
<CodeGroup>
|
<CodeGroup>
|
||||||
```markdown output/report.md
|
```markdown output/report.md
|
||||||
# Comprehensive Report on the Rise and Impact of AI Agents in 2024
|
# Comprehensive Report on the Rise and Impact of AI Agents in 2025
|
||||||
|
|
||||||
## 1. Introduction to AI Agents
|
## 1. Introduction to AI Agents
|
||||||
In 2024, Artificial Intelligence (AI) agents are at the forefront of innovation across various industries. As intelligent systems that can perform tasks typically requiring human cognition, AI agents are paving the way for significant advancements in operational efficiency, decision-making, and overall productivity within sectors like Human Resources (HR) and Finance. This report aims to detail the rise of AI agents, their frameworks, applications, and potential implications on the workforce.
|
In 2025, Artificial Intelligence (AI) agents are at the forefront of innovation across various industries. As intelligent systems that can perform tasks typically requiring human cognition, AI agents are paving the way for significant advancements in operational efficiency, decision-making, and overall productivity within sectors like Human Resources (HR) and Finance. This report aims to detail the rise of AI agents, their frameworks, applications, and potential implications on the workforce.
|
||||||
|
|
||||||
## 2. Benefits of AI Agents
|
## 2. Benefits of AI Agents
|
||||||
AI agents bring numerous advantages that are transforming traditional work environments. Key benefits include:
|
AI agents bring numerous advantages that are transforming traditional work environments. Key benefits include:
|
||||||
@@ -252,7 +252,7 @@ Follow the steps below to get crewing! 🚣♂️
|
|||||||
To stay competitive and harness the full potential of AI agents, organizations must remain vigilant about latest developments in AI technology and consider continuous learning and adaptation in their strategic planning.
|
To stay competitive and harness the full potential of AI agents, organizations must remain vigilant about latest developments in AI technology and consider continuous learning and adaptation in their strategic planning.
|
||||||
|
|
||||||
## 8. Conclusion
|
## 8. Conclusion
|
||||||
The emergence of AI agents is undeniably reshaping the workplace landscape in 2024. With their ability to automate tasks, enhance efficiency, and improve decision-making, AI agents are critical in driving operational success. Organizations must embrace and adapt to AI developments to thrive in an increasingly digital business environment.
|
The emergence of AI agents is undeniably reshaping the workplace landscape in 5. With their ability to automate tasks, enhance efficiency, and improve decision-making, AI agents are critical in driving operational success. Organizations must embrace and adapt to AI developments to thrive in an increasingly digital business environment.
|
||||||
```
|
```
|
||||||
</CodeGroup>
|
</CodeGroup>
|
||||||
</Step>
|
</Step>
|
||||||
@@ -278,7 +278,7 @@ email_summarizer:
|
|||||||
Summarize emails into a concise and clear summary
|
Summarize emails into a concise and clear summary
|
||||||
backstory: >
|
backstory: >
|
||||||
You will create a 5 bullet point summary of the report
|
You will create a 5 bullet point summary of the report
|
||||||
llm: mixtal_llm
|
llm: openai/gpt-4o
|
||||||
```
|
```
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
@@ -301,38 +301,166 @@ Use the annotations to properly reference the agent and task in the `crew.py` fi
|
|||||||
|
|
||||||
### Annotations include:
|
### Annotations include:
|
||||||
|
|
||||||
* `@agent`
|
Here are examples of how to use each annotation in your CrewAI project, and when you should use them:
|
||||||
* `@task`
|
|
||||||
* `@crew`
|
|
||||||
* `@tool`
|
|
||||||
* `@before_kickoff`
|
|
||||||
* `@after_kickoff`
|
|
||||||
* `@callback`
|
|
||||||
* `@output_json`
|
|
||||||
* `@output_pydantic`
|
|
||||||
* `@cache_handler`
|
|
||||||
|
|
||||||
```python crew.py
|
#### @agent
|
||||||
# ...
|
Used to define an agent in your crew. Use this when:
|
||||||
|
- You need to create a specialized AI agent with a specific role
|
||||||
|
- You want the agent to be automatically collected and managed by the crew
|
||||||
|
- You need to reuse the same agent configuration across multiple tasks
|
||||||
|
|
||||||
|
```python
|
||||||
@agent
|
@agent
|
||||||
def email_summarizer(self) -> Agent:
|
def research_agent(self) -> Agent:
|
||||||
return Agent(
|
return Agent(
|
||||||
config=self.agents_config["email_summarizer"],
|
role="Research Analyst",
|
||||||
|
goal="Conduct thorough research on given topics",
|
||||||
|
backstory="Expert researcher with years of experience in data analysis",
|
||||||
|
tools=[SerperDevTool()],
|
||||||
|
verbose=True
|
||||||
)
|
)
|
||||||
|
|
||||||
@task
|
|
||||||
def email_summarizer_task(self) -> Task:
|
|
||||||
return Task(
|
|
||||||
config=self.tasks_config["email_summarizer_task"],
|
|
||||||
)
|
|
||||||
# ...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<Tip>
|
#### @task
|
||||||
In addition to the [sequential process](../how-to/sequential-process), you can use the [hierarchical process](../how-to/hierarchical-process),
|
Used to define a task that can be executed by agents. Use this when:
|
||||||
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
|
- You need to define a specific piece of work for an agent
|
||||||
You can learn more about the core concepts [here](/concepts).
|
- You want tasks to be automatically sequenced and managed
|
||||||
</Tip>
|
- You need to establish dependencies between different tasks
|
||||||
|
|
||||||
|
```python
|
||||||
|
@task
|
||||||
|
def research_task(self) -> Task:
|
||||||
|
return Task(
|
||||||
|
description="Research the latest developments in AI technology",
|
||||||
|
expected_output="A comprehensive report on AI advancements",
|
||||||
|
agent=self.research_agent(),
|
||||||
|
output_file="output/research.md"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### @crew
|
||||||
|
Used to define your crew configuration. Use this when:
|
||||||
|
- You want to automatically collect all @agent and @task definitions
|
||||||
|
- You need to specify how tasks should be processed (sequential or hierarchical)
|
||||||
|
- You want to set up crew-wide configurations
|
||||||
|
|
||||||
|
```python
|
||||||
|
@crew
|
||||||
|
def research_crew(self) -> Crew:
|
||||||
|
return Crew(
|
||||||
|
agents=self.agents, # Automatically collected from @agent methods
|
||||||
|
tasks=self.tasks, # Automatically collected from @task methods
|
||||||
|
process=Process.sequential,
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### @tool
|
||||||
|
Used to create custom tools for your agents. Use this when:
|
||||||
|
- You need to give agents specific capabilities (like web search, data analysis)
|
||||||
|
- You want to encapsulate external API calls or complex operations
|
||||||
|
- You need to share functionality across multiple agents
|
||||||
|
|
||||||
|
```python
|
||||||
|
@tool
|
||||||
|
def web_search_tool(query: str, max_results: int = 5) -> list[str]:
|
||||||
|
"""
|
||||||
|
Search the web for information.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query: The search query
|
||||||
|
max_results: Maximum number of results to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of search results
|
||||||
|
"""
|
||||||
|
# Implement your search logic here
|
||||||
|
return [f"Result {i} for: {query}" for i in range(max_results)]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### @before_kickoff
|
||||||
|
Used to execute logic before the crew starts. Use this when:
|
||||||
|
- You need to validate or preprocess input data
|
||||||
|
- You want to set up resources or configurations before execution
|
||||||
|
- You need to perform any initialization logic
|
||||||
|
|
||||||
|
```python
|
||||||
|
@before_kickoff
|
||||||
|
def validate_inputs(self, inputs: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Validate and preprocess inputs before the crew starts."""
|
||||||
|
if inputs is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if 'topic' not in inputs:
|
||||||
|
raise ValueError("Topic is required")
|
||||||
|
|
||||||
|
# Add additional context
|
||||||
|
inputs['timestamp'] = datetime.now().isoformat()
|
||||||
|
inputs['topic'] = inputs['topic'].strip().lower()
|
||||||
|
return inputs
|
||||||
|
```
|
||||||
|
|
||||||
|
#### @after_kickoff
|
||||||
|
Used to process results after the crew completes. Use this when:
|
||||||
|
- You need to format or transform the final output
|
||||||
|
- You want to perform cleanup operations
|
||||||
|
- You need to save or log the results in a specific way
|
||||||
|
|
||||||
|
```python
|
||||||
|
@after_kickoff
|
||||||
|
def process_results(self, result: CrewOutput) -> CrewOutput:
|
||||||
|
"""Process and format the results after the crew completes."""
|
||||||
|
result.raw = result.raw.strip()
|
||||||
|
result.raw = f"""
|
||||||
|
# Research Results
|
||||||
|
Generated on: {datetime.now().isoformat()}
|
||||||
|
|
||||||
|
{result.raw}
|
||||||
|
"""
|
||||||
|
return result
|
||||||
|
```
|
||||||
|
|
||||||
|
#### @callback
|
||||||
|
Used to handle events during crew execution. Use this when:
|
||||||
|
- You need to monitor task progress
|
||||||
|
- You want to log intermediate results
|
||||||
|
- You need to implement custom progress tracking or metrics
|
||||||
|
|
||||||
|
```python
|
||||||
|
@callback
|
||||||
|
def log_task_completion(self, task: Task, output: str):
|
||||||
|
"""Log task completion details for monitoring."""
|
||||||
|
print(f"Task '{task.description}' completed")
|
||||||
|
print(f"Output length: {len(output)} characters")
|
||||||
|
print(f"Agent used: {task.agent.role}")
|
||||||
|
print("-" * 50)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### @cache_handler
|
||||||
|
Used to implement custom caching for task results. Use this when:
|
||||||
|
- You want to avoid redundant expensive operations
|
||||||
|
- You need to implement custom cache storage or expiration logic
|
||||||
|
- You want to persist results between runs
|
||||||
|
|
||||||
|
```python
|
||||||
|
@cache_handler
|
||||||
|
def custom_cache(self, key: str) -> Optional[str]:
|
||||||
|
"""Custom cache implementation for storing task results."""
|
||||||
|
cache_file = f"cache/{key}.json"
|
||||||
|
|
||||||
|
if os.path.exists(cache_file):
|
||||||
|
with open(cache_file, 'r') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
# Check if cache is still valid (e.g., not expired)
|
||||||
|
if datetime.fromisoformat(data['timestamp']) > datetime.now() - timedelta(days=1):
|
||||||
|
return data['result']
|
||||||
|
return None
|
||||||
|
```
|
||||||
|
|
||||||
|
<Note>
|
||||||
|
These decorators are part of the CrewAI framework and help organize your crew's structure by automatically collecting agents, tasks, and handling various lifecycle events.
|
||||||
|
They should be used within a class decorated with `@CrewBase`.
|
||||||
|
</Note>
|
||||||
|
|
||||||
### Replay Tasks from Latest Crew Kickoff
|
### Replay Tasks from Latest Crew Kickoff
|
||||||
|
|
||||||
|
|||||||
@@ -1,78 +1,118 @@
|
|||||||
---
|
---
|
||||||
title: Composio Tool
|
title: Composio Tool
|
||||||
description: The `ComposioTool` is a wrapper around the composio set of tools and gives your agent access to a wide variety of tools from the Composio SDK.
|
description: Composio provides 250+ production-ready tools for AI agents with flexible authentication management.
|
||||||
icon: gear-code
|
icon: gear-code
|
||||||
---
|
---
|
||||||
|
|
||||||
# `ComposioTool`
|
# `ComposioToolSet`
|
||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
Composio is an integration platform that allows you to connect your AI agents to 250+ tools. Key features include:
|
||||||
|
|
||||||
This tools is a wrapper around the composio set of tools and gives your agent access to a wide variety of tools from the Composio SDK.
|
- **Enterprise-Grade Authentication**: Built-in support for OAuth, API Keys, JWT with automatic token refresh
|
||||||
|
- **Full Observability**: Detailed tool usage logs, execution timestamps, and more
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
To incorporate this tool into your project, follow the installation instructions below:
|
To incorporate Composio tools into your project, follow the instructions below:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
pip install composio-core
|
pip install composio-crewai
|
||||||
pip install 'crewai[tools]'
|
pip install crewai
|
||||||
```
|
```
|
||||||
|
|
||||||
after the installation is complete, either run `composio login` or export your composio API key as `COMPOSIO_API_KEY`.
|
After the installation is complete, either run `composio login` or export your composio API key as `COMPOSIO_API_KEY`. Get your Composio API key from [here](https://app.composio.dev)
|
||||||
|
|
||||||
## Example
|
## Example
|
||||||
|
|
||||||
The following example demonstrates how to initialize the tool and execute a github action:
|
The following example demonstrates how to initialize the tool and execute a github action:
|
||||||
|
|
||||||
1. Initialize Composio tools
|
1. Initialize Composio toolset
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from composio import App
|
from composio_crewai import ComposioToolSet, App, Action
|
||||||
from crewai_tools import ComposioTool
|
from crewai import Agent, Task, Crew
|
||||||
from crewai import Agent, Task
|
|
||||||
|
|
||||||
|
toolset = ComposioToolSet()
|
||||||
tools = [ComposioTool.from_action(action=Action.GITHUB_ACTIVITY_STAR_REPO_FOR_AUTHENTICATED_USER)]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
If you don't know what action you want to use, use `from_app` and `tags` filter to get relevant actions
|
2. Connect your GitHub account
|
||||||
|
<CodeGroup>
|
||||||
|
```shell CLI
|
||||||
|
composio add github
|
||||||
|
```
|
||||||
```python Code
|
```python Code
|
||||||
tools = ComposioTool.from_app(App.GITHUB, tags=["important"])
|
request = toolset.initiate_connection(app=App.GITHUB)
|
||||||
|
print(f"Open this URL to authenticate: {request.redirectUrl}")
|
||||||
```
|
```
|
||||||
|
</CodeGroup>
|
||||||
|
|
||||||
or use `use_case` to search relevant actions
|
3. Get Tools
|
||||||
|
|
||||||
|
- Retrieving all the tools from an app (not recommended for production):
|
||||||
```python Code
|
```python Code
|
||||||
tools = ComposioTool.from_app(App.GITHUB, use_case="Star a github repository")
|
tools = toolset.get_tools(apps=[App.GITHUB])
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Define agent
|
- Filtering tools based on tags:
|
||||||
|
```python Code
|
||||||
|
tag = "users"
|
||||||
|
|
||||||
|
filtered_action_enums = toolset.find_actions_by_tags(
|
||||||
|
App.GITHUB,
|
||||||
|
tags=[tag],
|
||||||
|
)
|
||||||
|
|
||||||
|
tools = toolset.get_tools(actions=filtered_action_enums)
|
||||||
|
```
|
||||||
|
|
||||||
|
- Filtering tools based on use case:
|
||||||
|
```python Code
|
||||||
|
use_case = "Star a repository on GitHub"
|
||||||
|
|
||||||
|
filtered_action_enums = toolset.find_actions_by_use_case(
|
||||||
|
App.GITHUB, use_case=use_case, advanced=False
|
||||||
|
)
|
||||||
|
|
||||||
|
tools = toolset.get_tools(actions=filtered_action_enums)
|
||||||
|
```
|
||||||
|
<Tip>Set `advanced` to True to get actions for complex use cases</Tip>
|
||||||
|
|
||||||
|
- Using specific tools:
|
||||||
|
|
||||||
|
In this demo, we will use the `GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER` action from the GitHub app.
|
||||||
|
```python Code
|
||||||
|
tools = toolset.get_tools(
|
||||||
|
actions=[Action.GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
Learn more about filtering actions [here](https://docs.composio.dev/patterns/tools/use-tools/use-specific-actions)
|
||||||
|
|
||||||
|
4. Define agent
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
crewai_agent = Agent(
|
crewai_agent = Agent(
|
||||||
role="Github Agent",
|
role="GitHub Agent",
|
||||||
goal="You take action on Github using Github APIs",
|
goal="You take action on GitHub using GitHub APIs",
|
||||||
backstory=(
|
backstory="You are AI agent that is responsible for taking actions on GitHub on behalf of users using GitHub APIs",
|
||||||
"You are AI agent that is responsible for taking actions on Github "
|
|
||||||
"on users behalf. You need to take action on Github using Github APIs"
|
|
||||||
),
|
|
||||||
verbose=True,
|
verbose=True,
|
||||||
tools=tools,
|
tools=tools,
|
||||||
|
llm= # pass an llm
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Execute task
|
5. Execute task
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
task = Task(
|
task = Task(
|
||||||
description="Star a repo ComposioHQ/composio on GitHub",
|
description="Star a repo composiohq/composio on GitHub",
|
||||||
agent=crewai_agent,
|
agent=crewai_agent,
|
||||||
expected_output="if the star happened",
|
expected_output="Status of the operation",
|
||||||
)
|
)
|
||||||
|
|
||||||
task.execute()
|
crew = Crew(agents=[crewai_agent], tasks=[task])
|
||||||
|
|
||||||
|
crew.kickoff()
|
||||||
```
|
```
|
||||||
|
|
||||||
* More detailed list of tools can be found [here](https://app.composio.dev)
|
* More detailed list of tools can be found [here](https://app.composio.dev)
|
||||||
|
|||||||
@@ -8,9 +8,9 @@ icon: file-pen
|
|||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
|
||||||
The `FileWriterTool` is a component of the crewai_tools package, designed to simplify the process of writing content to files.
|
The `FileWriterTool` is a component of the crewai_tools package, designed to simplify the process of writing content to files with cross-platform compatibility (Windows, Linux, macOS).
|
||||||
It is particularly useful in scenarios such as generating reports, saving logs, creating configuration files, and more.
|
It is particularly useful in scenarios such as generating reports, saving logs, creating configuration files, and more.
|
||||||
This tool supports creating new directories if they don't exist, making it easier to organize your output.
|
This tool handles path differences across operating systems, supports UTF-8 encoding, and automatically creates directories if they don't exist, making it easier to organize your output reliably across different platforms.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
@@ -43,6 +43,8 @@ print(result)
|
|||||||
|
|
||||||
## Conclusion
|
## Conclusion
|
||||||
|
|
||||||
By integrating the `FileWriterTool` into your crews, the agents can execute the process of writing content to files and creating directories.
|
By integrating the `FileWriterTool` into your crews, the agents can reliably write content to files across different operating systems.
|
||||||
This tool is essential for tasks that require saving output data, creating structured file systems, and more. By adhering to the setup and usage guidelines provided,
|
This tool is essential for tasks that require saving output data, creating structured file systems, and handling cross-platform file operations.
|
||||||
incorporating this tool into projects is straightforward and efficient.
|
It's particularly recommended for Windows users who may encounter file writing issues with standard Python file operations.
|
||||||
|
|
||||||
|
By adhering to the setup and usage guidelines provided, incorporating this tool into projects is straightforward and ensures consistent file writing behavior across all platforms.
|
||||||
|
|||||||
@@ -152,6 +152,7 @@ nav:
|
|||||||
- Agent Monitoring with AgentOps: 'how-to/AgentOps-Observability.md'
|
- Agent Monitoring with AgentOps: 'how-to/AgentOps-Observability.md'
|
||||||
- Agent Monitoring with LangTrace: 'how-to/Langtrace-Observability.md'
|
- Agent Monitoring with LangTrace: 'how-to/Langtrace-Observability.md'
|
||||||
- Agent Monitoring with OpenLIT: 'how-to/openlit-Observability.md'
|
- Agent Monitoring with OpenLIT: 'how-to/openlit-Observability.md'
|
||||||
|
- Agent Monitoring with MLflow: 'how-to/mlflow-Observability.md'
|
||||||
- Tools Docs:
|
- Tools Docs:
|
||||||
- Browserbase Web Loader: 'tools/BrowserbaseLoadTool.md'
|
- Browserbase Web Loader: 'tools/BrowserbaseLoadTool.md'
|
||||||
- Code Docs RAG Search: 'tools/CodeDocsSearchTool.md'
|
- Code Docs RAG Search: 'tools/CodeDocsSearchTool.md'
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "crewai"
|
name = "crewai"
|
||||||
version = "0.86.0"
|
version = "0.100.1"
|
||||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.10,<3.13"
|
requires-python = ">=3.10,<3.13"
|
||||||
@@ -11,27 +11,22 @@ dependencies = [
|
|||||||
# Core Dependencies
|
# Core Dependencies
|
||||||
"pydantic>=2.4.2",
|
"pydantic>=2.4.2",
|
||||||
"openai>=1.13.3",
|
"openai>=1.13.3",
|
||||||
"litellm>=1.56.4",
|
"litellm==1.60.2",
|
||||||
"instructor>=1.3.3",
|
"instructor>=1.3.3",
|
||||||
|
|
||||||
# Text Processing
|
# Text Processing
|
||||||
"pdfplumber>=0.11.4",
|
"pdfplumber>=0.11.4",
|
||||||
"regex>=2024.9.11",
|
"regex>=2024.9.11",
|
||||||
|
|
||||||
# Telemetry and Monitoring
|
# Telemetry and Monitoring
|
||||||
"opentelemetry-api>=1.22.0",
|
"opentelemetry-api>=1.22.0",
|
||||||
"opentelemetry-sdk>=1.22.0",
|
"opentelemetry-sdk>=1.22.0",
|
||||||
"opentelemetry-exporter-otlp-proto-http>=1.22.0",
|
"opentelemetry-exporter-otlp-proto-http>=1.22.0",
|
||||||
|
|
||||||
# Data Handling
|
# Data Handling
|
||||||
"chromadb>=0.5.23",
|
"chromadb>=0.5.23",
|
||||||
"openpyxl>=3.1.5",
|
"openpyxl>=3.1.5",
|
||||||
"pyvis>=0.3.2",
|
"pyvis>=0.3.2",
|
||||||
|
|
||||||
# Authentication and Security
|
# Authentication and Security
|
||||||
"auth0-python>=4.7.1",
|
"auth0-python>=4.7.1",
|
||||||
"python-dotenv>=1.0.0",
|
"python-dotenv>=1.0.0",
|
||||||
|
|
||||||
# Configuration and Utils
|
# Configuration and Utils
|
||||||
"click>=8.1.7",
|
"click>=8.1.7",
|
||||||
"appdirs>=1.4.4",
|
"appdirs>=1.4.4",
|
||||||
@@ -41,6 +36,7 @@ dependencies = [
|
|||||||
"tomli-w>=1.1.0",
|
"tomli-w>=1.1.0",
|
||||||
"tomli>=2.0.2",
|
"tomli>=2.0.2",
|
||||||
"blinker>=1.9.0",
|
"blinker>=1.9.0",
|
||||||
|
"json5>=0.10.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
@@ -49,7 +45,7 @@ Documentation = "https://docs.crewai.com"
|
|||||||
Repository = "https://github.com/crewAIInc/crewAI"
|
Repository = "https://github.com/crewAIInc/crewAI"
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
tools = ["crewai-tools>=0.17.0"]
|
tools = ["crewai-tools>=0.32.1"]
|
||||||
embeddings = [
|
embeddings = [
|
||||||
"tiktoken~=0.7.0"
|
"tiktoken~=0.7.0"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ warnings.filterwarnings(
|
|||||||
category=UserWarning,
|
category=UserWarning,
|
||||||
module="pydantic.main",
|
module="pydantic.main",
|
||||||
)
|
)
|
||||||
__version__ = "0.86.0"
|
__version__ = "0.100.1"
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"Agent",
|
"Agent",
|
||||||
"Crew",
|
"Crew",
|
||||||
|
|||||||
@@ -243,6 +243,15 @@ class Agent(BaseAgent):
|
|||||||
if isinstance(self.knowledge_sources, list) and all(
|
if isinstance(self.knowledge_sources, list) and all(
|
||||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||||
):
|
):
|
||||||
|
# Validate embedding configuration based on provider
|
||||||
|
from crewai.utilities.constants import DEFAULT_EMBEDDING_PROVIDER
|
||||||
|
provider = os.getenv("CREWAI_EMBEDDING_PROVIDER", DEFAULT_EMBEDDING_PROVIDER)
|
||||||
|
|
||||||
|
if provider == "openai" and not os.getenv("OPENAI_API_KEY"):
|
||||||
|
raise ValueError("Please provide an OpenAI API key via OPENAI_API_KEY environment variable")
|
||||||
|
elif provider == "ollama" and not os.getenv("CREWAI_OLLAMA_URL", "http://localhost:11434/api/embeddings"):
|
||||||
|
raise ValueError("Please provide Ollama URL via CREWAI_OLLAMA_URL environment variable")
|
||||||
|
|
||||||
self._knowledge = Knowledge(
|
self._knowledge = Knowledge(
|
||||||
sources=self.knowledge_sources,
|
sources=self.knowledge_sources,
|
||||||
embedder_config=self.embedder_config,
|
embedder_config=self.embedder_config,
|
||||||
|
|||||||
@@ -18,10 +18,13 @@ from pydantic_core import PydanticCustomError
|
|||||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||||
from crewai.agents.cache.cache_handler import CacheHandler
|
from crewai.agents.cache.cache_handler import CacheHandler
|
||||||
from crewai.agents.tools_handler import ToolsHandler
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
|
from crewai.knowledge.knowledge import Knowledge
|
||||||
|
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||||
from crewai.tools import BaseTool
|
from crewai.tools import BaseTool
|
||||||
from crewai.tools.base_tool import Tool
|
from crewai.tools.base_tool import Tool
|
||||||
from crewai.utilities import I18N, Logger, RPMController
|
from crewai.utilities import I18N, Logger, RPMController
|
||||||
from crewai.utilities.config import process_config
|
from crewai.utilities.config import process_config
|
||||||
|
from crewai.utilities.converter import Converter
|
||||||
|
|
||||||
T = TypeVar("T", bound="BaseAgent")
|
T = TypeVar("T", bound="BaseAgent")
|
||||||
|
|
||||||
@@ -40,7 +43,7 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
max_rpm (Optional[int]): Maximum number of requests per minute for the agent execution.
|
max_rpm (Optional[int]): Maximum number of requests per minute for the agent execution.
|
||||||
allow_delegation (bool): Allow delegation of tasks to agents.
|
allow_delegation (bool): Allow delegation of tasks to agents.
|
||||||
tools (Optional[List[Any]]): Tools at the agent's disposal.
|
tools (Optional[List[Any]]): Tools at the agent's disposal.
|
||||||
max_iter (Optional[int]): Maximum iterations for an agent to execute a task.
|
max_iter (int): Maximum iterations for an agent to execute a task.
|
||||||
agent_executor (InstanceOf): An instance of the CrewAgentExecutor class.
|
agent_executor (InstanceOf): An instance of the CrewAgentExecutor class.
|
||||||
llm (Any): Language model that will run the agent.
|
llm (Any): Language model that will run the agent.
|
||||||
crew (Any): Crew to which the agent belongs.
|
crew (Any): Crew to which the agent belongs.
|
||||||
@@ -48,6 +51,8 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
cache_handler (InstanceOf[CacheHandler]): An instance of the CacheHandler class.
|
cache_handler (InstanceOf[CacheHandler]): An instance of the CacheHandler class.
|
||||||
tools_handler (InstanceOf[ToolsHandler]): An instance of the ToolsHandler class.
|
tools_handler (InstanceOf[ToolsHandler]): An instance of the ToolsHandler class.
|
||||||
max_tokens: Maximum number of tokens for the agent to generate in a response.
|
max_tokens: Maximum number of tokens for the agent to generate in a response.
|
||||||
|
knowledge_sources: Knowledge sources for the agent.
|
||||||
|
knowledge_storage: Custom knowledge storage for the agent.
|
||||||
|
|
||||||
|
|
||||||
Methods:
|
Methods:
|
||||||
@@ -108,9 +113,9 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
description="Enable agent to delegate and ask questions among each other.",
|
description="Enable agent to delegate and ask questions among each other.",
|
||||||
)
|
)
|
||||||
tools: Optional[List[Any]] = Field(
|
tools: Optional[List[Any]] = Field(
|
||||||
default_factory=list, description="Tools at agents' disposal"
|
default_factory=lambda: [], description="Tools at agents' disposal"
|
||||||
)
|
)
|
||||||
max_iter: Optional[int] = Field(
|
max_iter: int = Field(
|
||||||
default=25, description="Maximum iterations for an agent to execute a task"
|
default=25, description="Maximum iterations for an agent to execute a task"
|
||||||
)
|
)
|
||||||
agent_executor: InstanceOf = Field(
|
agent_executor: InstanceOf = Field(
|
||||||
@@ -121,15 +126,27 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
)
|
)
|
||||||
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
|
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
|
||||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||||
cache_handler: InstanceOf[CacheHandler] = Field(
|
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
|
||||||
default=None, description="An instance of the CacheHandler class."
|
default=None, description="An instance of the CacheHandler class."
|
||||||
)
|
)
|
||||||
tools_handler: InstanceOf[ToolsHandler] = Field(
|
tools_handler: InstanceOf[ToolsHandler] = Field(
|
||||||
default=None, description="An instance of the ToolsHandler class."
|
default_factory=ToolsHandler,
|
||||||
|
description="An instance of the ToolsHandler class.",
|
||||||
)
|
)
|
||||||
max_tokens: Optional[int] = Field(
|
max_tokens: Optional[int] = Field(
|
||||||
default=None, description="Maximum number of tokens for the agent's execution."
|
default=None, description="Maximum number of tokens for the agent's execution."
|
||||||
)
|
)
|
||||||
|
knowledge: Optional[Knowledge] = Field(
|
||||||
|
default=None, description="Knowledge for the agent."
|
||||||
|
)
|
||||||
|
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Knowledge sources for the agent.",
|
||||||
|
)
|
||||||
|
knowledge_storage: Optional[Any] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Custom knowledge storage for the agent.",
|
||||||
|
)
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -239,7 +256,7 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_output_converter(
|
def get_output_converter(
|
||||||
self, llm: Any, text: str, model: type[BaseModel] | None, instructions: str
|
self, llm: Any, text: str, model: type[BaseModel] | None, instructions: str
|
||||||
):
|
) -> Converter:
|
||||||
"""Get the converter class for the agent to create json/pydantic outputs."""
|
"""Get the converter class for the agent to create json/pydantic outputs."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -256,13 +273,44 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
"tools_handler",
|
"tools_handler",
|
||||||
"cache_handler",
|
"cache_handler",
|
||||||
"llm",
|
"llm",
|
||||||
|
"knowledge_sources",
|
||||||
|
"knowledge_storage",
|
||||||
|
"knowledge",
|
||||||
}
|
}
|
||||||
|
|
||||||
# Copy llm and clear callbacks
|
# Copy llm
|
||||||
existing_llm = shallow_copy(self.llm)
|
existing_llm = shallow_copy(self.llm)
|
||||||
|
copied_knowledge = shallow_copy(self.knowledge)
|
||||||
|
copied_knowledge_storage = shallow_copy(self.knowledge_storage)
|
||||||
|
# Properly copy knowledge sources if they exist
|
||||||
|
existing_knowledge_sources = None
|
||||||
|
if self.knowledge_sources:
|
||||||
|
# Create a shared storage instance for all knowledge sources
|
||||||
|
shared_storage = (
|
||||||
|
self.knowledge_sources[0].storage if self.knowledge_sources else None
|
||||||
|
)
|
||||||
|
|
||||||
|
existing_knowledge_sources = []
|
||||||
|
for source in self.knowledge_sources:
|
||||||
|
copied_source = (
|
||||||
|
source.model_copy()
|
||||||
|
if hasattr(source, "model_copy")
|
||||||
|
else shallow_copy(source)
|
||||||
|
)
|
||||||
|
# Ensure all copied sources use the same storage instance
|
||||||
|
copied_source.storage = shared_storage
|
||||||
|
existing_knowledge_sources.append(copied_source)
|
||||||
|
|
||||||
copied_data = self.model_dump(exclude=exclude)
|
copied_data = self.model_dump(exclude=exclude)
|
||||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||||
copied_agent = type(self)(**copied_data, llm=existing_llm, tools=self.tools)
|
copied_agent = type(self)(
|
||||||
|
**copied_data,
|
||||||
|
llm=existing_llm,
|
||||||
|
tools=self.tools,
|
||||||
|
knowledge_sources=existing_knowledge_sources,
|
||||||
|
knowledge=copied_knowledge,
|
||||||
|
knowledge_storage=copied_knowledge_storage,
|
||||||
|
)
|
||||||
|
|
||||||
return copied_agent
|
return copied_agent
|
||||||
|
|
||||||
|
|||||||
@@ -19,15 +19,10 @@ class CrewAgentExecutorMixin:
|
|||||||
agent: Optional["BaseAgent"]
|
agent: Optional["BaseAgent"]
|
||||||
task: Optional["Task"]
|
task: Optional["Task"]
|
||||||
iterations: int
|
iterations: int
|
||||||
have_forced_answer: bool
|
|
||||||
max_iter: int
|
max_iter: int
|
||||||
_i18n: I18N
|
_i18n: I18N
|
||||||
_printer: Printer = Printer()
|
_printer: Printer = Printer()
|
||||||
|
|
||||||
def _should_force_answer(self) -> bool:
|
|
||||||
"""Determine if a forced answer is required based on iteration count."""
|
|
||||||
return (self.iterations >= self.max_iter) and not self.have_forced_answer
|
|
||||||
|
|
||||||
def _create_short_term_memory(self, output) -> None:
|
def _create_short_term_memory(self, output) -> None:
|
||||||
"""Create and save a short-term memory item if conditions are met."""
|
"""Create and save a short-term memory item if conditions are met."""
|
||||||
if (
|
if (
|
||||||
@@ -100,18 +95,29 @@ class CrewAgentExecutorMixin:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def _ask_human_input(self, final_answer: str) -> str:
|
def _ask_human_input(self, final_answer: str) -> str:
|
||||||
"""Prompt human input for final decision making."""
|
"""Prompt human input with mode-appropriate messaging."""
|
||||||
self._printer.print(
|
self._printer.print(
|
||||||
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"
|
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"
|
||||||
)
|
)
|
||||||
|
|
||||||
self._printer.print(
|
# Training mode prompt (single iteration)
|
||||||
content=(
|
if self.crew and getattr(self.crew, "_train", False):
|
||||||
|
prompt = (
|
||||||
"\n\n=====\n"
|
"\n\n=====\n"
|
||||||
"## Please provide feedback on the Final Result and the Agent's actions. "
|
"## TRAINING MODE: Provide feedback to improve the agent's performance.\n"
|
||||||
"Respond with 'looks good' or a similar phrase when you're satisfied.\n"
|
"This will be used to train better versions of the agent.\n"
|
||||||
|
"Please provide detailed feedback about the result quality and reasoning process.\n"
|
||||||
"=====\n"
|
"=====\n"
|
||||||
),
|
)
|
||||||
color="bold_yellow",
|
# Regular human-in-the-loop prompt (multiple iterations)
|
||||||
)
|
else:
|
||||||
|
prompt = (
|
||||||
|
"\n\n=====\n"
|
||||||
|
"## HUMAN FEEDBACK: Provide feedback on the Final Result and Agent's actions.\n"
|
||||||
|
"Respond with 'looks good' to accept or provide specific improvement requests.\n"
|
||||||
|
"You can provide multiple rounds of feedback until satisfied.\n"
|
||||||
|
"=====\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
self._printer.print(content=prompt, color="bold_yellow")
|
||||||
return input()
|
return input()
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ class OutputConverter(BaseModel, ABC):
|
|||||||
llm: Any = Field(description="The language model to be used to convert the text.")
|
llm: Any = Field(description="The language model to be used to convert the text.")
|
||||||
model: Any = Field(description="The model to be used to convert the text.")
|
model: Any = Field(description="The model to be used to convert the text.")
|
||||||
instructions: str = Field(description="Conversion instructions to the LLM.")
|
instructions: str = Field(description="Conversion instructions to the LLM.")
|
||||||
max_attempts: Optional[int] = Field(
|
max_attempts: int = Field(
|
||||||
description="Max number of attempts to try to get the output formatted.",
|
description="Max number of attempts to try to get the output formatted.",
|
||||||
default=3,
|
default=3,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -2,25 +2,26 @@ from crewai.types.usage_metrics import UsageMetrics
|
|||||||
|
|
||||||
|
|
||||||
class TokenProcess:
|
class TokenProcess:
|
||||||
total_tokens: int = 0
|
def __init__(self) -> None:
|
||||||
prompt_tokens: int = 0
|
self.total_tokens: int = 0
|
||||||
cached_prompt_tokens: int = 0
|
self.prompt_tokens: int = 0
|
||||||
completion_tokens: int = 0
|
self.cached_prompt_tokens: int = 0
|
||||||
successful_requests: int = 0
|
self.completion_tokens: int = 0
|
||||||
|
self.successful_requests: int = 0
|
||||||
|
|
||||||
def sum_prompt_tokens(self, tokens: int):
|
def sum_prompt_tokens(self, tokens: int) -> None:
|
||||||
self.prompt_tokens = self.prompt_tokens + tokens
|
self.prompt_tokens += tokens
|
||||||
self.total_tokens = self.total_tokens + tokens
|
self.total_tokens += tokens
|
||||||
|
|
||||||
def sum_completion_tokens(self, tokens: int):
|
def sum_completion_tokens(self, tokens: int) -> None:
|
||||||
self.completion_tokens = self.completion_tokens + tokens
|
self.completion_tokens += tokens
|
||||||
self.total_tokens = self.total_tokens + tokens
|
self.total_tokens += tokens
|
||||||
|
|
||||||
def sum_cached_prompt_tokens(self, tokens: int):
|
def sum_cached_prompt_tokens(self, tokens: int) -> None:
|
||||||
self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
|
self.cached_prompt_tokens += tokens
|
||||||
|
|
||||||
def sum_successful_requests(self, requests: int):
|
def sum_successful_requests(self, requests: int) -> None:
|
||||||
self.successful_requests = self.successful_requests + requests
|
self.successful_requests += requests
|
||||||
|
|
||||||
def get_summary(self) -> UsageMetrics:
|
def get_summary(self) -> UsageMetrics:
|
||||||
return UsageMetrics(
|
return UsageMetrics(
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Any, Dict, List, Union
|
from typing import Any, Callable, Dict, List, Optional, Union
|
||||||
|
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||||
@@ -13,6 +13,7 @@ from crewai.agents.parser import (
|
|||||||
OutputParserException,
|
OutputParserException,
|
||||||
)
|
)
|
||||||
from crewai.agents.tools_handler import ToolsHandler
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
|
from crewai.llm import LLM
|
||||||
from crewai.tools.base_tool import BaseTool
|
from crewai.tools.base_tool import BaseTool
|
||||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||||
from crewai.utilities import I18N, Printer
|
from crewai.utilities import I18N, Printer
|
||||||
@@ -50,11 +51,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
original_tools: List[Any] = [],
|
original_tools: List[Any] = [],
|
||||||
function_calling_llm: Any = None,
|
function_calling_llm: Any = None,
|
||||||
respect_context_window: bool = False,
|
respect_context_window: bool = False,
|
||||||
request_within_rpm_limit: Any = None,
|
request_within_rpm_limit: Optional[Callable[[], bool]] = None,
|
||||||
callbacks: List[Any] = [],
|
callbacks: List[Any] = [],
|
||||||
):
|
):
|
||||||
self._i18n: I18N = I18N()
|
self._i18n: I18N = I18N()
|
||||||
self.llm = llm
|
self.llm: LLM = llm
|
||||||
self.task = task
|
self.task = task
|
||||||
self.agent = agent
|
self.agent = agent
|
||||||
self.crew = crew
|
self.crew = crew
|
||||||
@@ -77,14 +78,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self.messages: List[Dict[str, str]] = []
|
self.messages: List[Dict[str, str]] = []
|
||||||
self.iterations = 0
|
self.iterations = 0
|
||||||
self.log_error_after = 3
|
self.log_error_after = 3
|
||||||
self.have_forced_answer = False
|
|
||||||
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
|
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
|
||||||
tool.name: tool for tool in self.tools
|
tool.name: tool for tool in self.tools
|
||||||
}
|
}
|
||||||
if self.llm.stop:
|
self.stop = stop_words
|
||||||
self.llm.stop = list(set(self.llm.stop + self.stop))
|
self.llm.stop = list(set(self.llm.stop + self.stop))
|
||||||
else:
|
|
||||||
self.llm.stop = self.stop
|
|
||||||
|
|
||||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||||
if "system" in self.prompt:
|
if "system" in self.prompt:
|
||||||
@@ -99,7 +97,22 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self._show_start_logs()
|
self._show_start_logs()
|
||||||
|
|
||||||
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
||||||
formatted_answer = self._invoke_loop()
|
|
||||||
|
try:
|
||||||
|
formatted_answer = self._invoke_loop()
|
||||||
|
except AssertionError:
|
||||||
|
self._printer.print(
|
||||||
|
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
if e.__class__.__module__.startswith("litellm"):
|
||||||
|
# Do not retry on litellm errors
|
||||||
|
raise e
|
||||||
|
else:
|
||||||
|
self._handle_unknown_error(e)
|
||||||
|
raise e
|
||||||
|
|
||||||
if self.ask_for_human_input:
|
if self.ask_for_human_input:
|
||||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||||
@@ -108,106 +121,178 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self._create_long_term_memory(formatted_answer)
|
self._create_long_term_memory(formatted_answer)
|
||||||
return {"output": formatted_answer.output}
|
return {"output": formatted_answer.output}
|
||||||
|
|
||||||
def _invoke_loop(self, formatted_answer=None):
|
def _invoke_loop(self) -> AgentFinish:
|
||||||
|
"""
|
||||||
|
Main loop to invoke the agent's thought process until it reaches a conclusion
|
||||||
|
or the maximum number of iterations is reached.
|
||||||
|
"""
|
||||||
|
formatted_answer = None
|
||||||
|
while not isinstance(formatted_answer, AgentFinish):
|
||||||
|
try:
|
||||||
|
if self._has_reached_max_iterations():
|
||||||
|
formatted_answer = self._handle_max_iterations_exceeded(
|
||||||
|
formatted_answer
|
||||||
|
)
|
||||||
|
break
|
||||||
|
|
||||||
|
self._enforce_rpm_limit()
|
||||||
|
|
||||||
|
answer = self._get_llm_response()
|
||||||
|
formatted_answer = self._process_llm_response(answer)
|
||||||
|
|
||||||
|
if isinstance(formatted_answer, AgentAction):
|
||||||
|
tool_result = self._execute_tool_and_check_finality(
|
||||||
|
formatted_answer
|
||||||
|
)
|
||||||
|
formatted_answer = self._handle_agent_action(
|
||||||
|
formatted_answer, tool_result
|
||||||
|
)
|
||||||
|
|
||||||
|
self._invoke_step_callback(formatted_answer)
|
||||||
|
self._append_message(formatted_answer.text, role="assistant")
|
||||||
|
|
||||||
|
except OutputParserException as e:
|
||||||
|
formatted_answer = self._handle_output_parser_exception(e)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
if e.__class__.__module__.startswith("litellm"):
|
||||||
|
# Do not retry on litellm errors
|
||||||
|
raise e
|
||||||
|
if self._is_context_length_exceeded(e):
|
||||||
|
self._handle_context_length()
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
self._handle_unknown_error(e)
|
||||||
|
raise e
|
||||||
|
finally:
|
||||||
|
self.iterations += 1
|
||||||
|
|
||||||
|
# During the invoke loop, formatted_answer alternates between AgentAction
|
||||||
|
# (when the agent is using tools) and eventually becomes AgentFinish
|
||||||
|
# (when the agent reaches a final answer). This assertion confirms we've
|
||||||
|
# reached a final answer and helps type checking understand this transition.
|
||||||
|
assert isinstance(formatted_answer, AgentFinish)
|
||||||
|
self._show_logs(formatted_answer)
|
||||||
|
return formatted_answer
|
||||||
|
|
||||||
|
def _handle_unknown_error(self, exception: Exception) -> None:
|
||||||
|
"""Handle unknown errors by informing the user."""
|
||||||
|
self._printer.print(
|
||||||
|
content="An unknown error occurred. Please check the details below.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
self._printer.print(
|
||||||
|
content=f"Error details: {exception}",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _has_reached_max_iterations(self) -> bool:
|
||||||
|
"""Check if the maximum number of iterations has been reached."""
|
||||||
|
return self.iterations >= self.max_iter
|
||||||
|
|
||||||
|
def _enforce_rpm_limit(self) -> None:
|
||||||
|
"""Enforce the requests per minute (RPM) limit if applicable."""
|
||||||
|
if self.request_within_rpm_limit:
|
||||||
|
self.request_within_rpm_limit()
|
||||||
|
|
||||||
|
def _get_llm_response(self) -> str:
|
||||||
|
"""Call the LLM and return the response, handling any invalid responses."""
|
||||||
try:
|
try:
|
||||||
while not isinstance(formatted_answer, AgentFinish):
|
answer = self.llm.call(
|
||||||
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
|
self.messages,
|
||||||
answer = self.llm.call(
|
callbacks=self.callbacks,
|
||||||
self.messages,
|
)
|
||||||
callbacks=self.callbacks,
|
|
||||||
)
|
|
||||||
|
|
||||||
if answer is None or answer == "":
|
|
||||||
self._printer.print(
|
|
||||||
content="Received None or empty response from LLM call.",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
raise ValueError(
|
|
||||||
"Invalid response from LLM call - None or empty."
|
|
||||||
)
|
|
||||||
|
|
||||||
if not self.use_stop_words:
|
|
||||||
try:
|
|
||||||
self._format_answer(answer)
|
|
||||||
except OutputParserException as e:
|
|
||||||
if (
|
|
||||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE
|
|
||||||
in e.error
|
|
||||||
):
|
|
||||||
answer = answer.split("Observation:")[0].strip()
|
|
||||||
|
|
||||||
self.iterations += 1
|
|
||||||
formatted_answer = self._format_answer(answer)
|
|
||||||
|
|
||||||
if isinstance(formatted_answer, AgentAction):
|
|
||||||
tool_result = self._execute_tool_and_check_finality(
|
|
||||||
formatted_answer
|
|
||||||
)
|
|
||||||
|
|
||||||
# Directly append the result to the messages if the
|
|
||||||
# tool is "Add image to content" in case of multimodal
|
|
||||||
# agents
|
|
||||||
if formatted_answer.tool == self._i18n.tools("add_image")["name"]:
|
|
||||||
self.messages.append(tool_result.result)
|
|
||||||
continue
|
|
||||||
|
|
||||||
else:
|
|
||||||
if self.step_callback:
|
|
||||||
self.step_callback(tool_result)
|
|
||||||
|
|
||||||
formatted_answer.text += f"\nObservation: {tool_result.result}"
|
|
||||||
|
|
||||||
formatted_answer.result = tool_result.result
|
|
||||||
if tool_result.result_as_answer:
|
|
||||||
return AgentFinish(
|
|
||||||
thought="",
|
|
||||||
output=tool_result.result,
|
|
||||||
text=formatted_answer.text,
|
|
||||||
)
|
|
||||||
self._show_logs(formatted_answer)
|
|
||||||
|
|
||||||
if self.step_callback:
|
|
||||||
self.step_callback(formatted_answer)
|
|
||||||
|
|
||||||
if self._should_force_answer():
|
|
||||||
if self.have_forced_answer:
|
|
||||||
return AgentFinish(
|
|
||||||
thought="",
|
|
||||||
output=self._i18n.errors(
|
|
||||||
"force_final_answer_error"
|
|
||||||
).format(formatted_answer.text),
|
|
||||||
text=formatted_answer.text,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
formatted_answer.text += (
|
|
||||||
f'\n{self._i18n.errors("force_final_answer")}'
|
|
||||||
)
|
|
||||||
self.have_forced_answer = True
|
|
||||||
self.messages.append(
|
|
||||||
self._format_msg(formatted_answer.text, role="assistant")
|
|
||||||
)
|
|
||||||
|
|
||||||
except OutputParserException as e:
|
|
||||||
self.messages.append({"role": "user", "content": e.error})
|
|
||||||
if self.iterations > self.log_error_after:
|
|
||||||
self._printer.print(
|
|
||||||
content=f"Error parsing LLM output, agent will retry: {e.error}",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
return self._invoke_loop(formatted_answer)
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
|
self._printer.print(
|
||||||
str(e)
|
content=f"Error during LLM call: {e}",
|
||||||
):
|
color="red",
|
||||||
self._handle_context_length()
|
)
|
||||||
return self._invoke_loop(formatted_answer)
|
raise e
|
||||||
else:
|
|
||||||
raise e
|
if not answer:
|
||||||
|
self._printer.print(
|
||||||
|
content="Received None or empty response from LLM call.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||||
|
|
||||||
|
return answer
|
||||||
|
|
||||||
|
def _process_llm_response(self, answer: str) -> Union[AgentAction, AgentFinish]:
|
||||||
|
"""Process the LLM response and format it into an AgentAction or AgentFinish."""
|
||||||
|
if not self.use_stop_words:
|
||||||
|
try:
|
||||||
|
# Preliminary parsing to check for errors.
|
||||||
|
self._format_answer(answer)
|
||||||
|
except OutputParserException as e:
|
||||||
|
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
|
||||||
|
answer = answer.split("Observation:")[0].strip()
|
||||||
|
|
||||||
|
return self._format_answer(answer)
|
||||||
|
|
||||||
|
def _handle_agent_action(
|
||||||
|
self, formatted_answer: AgentAction, tool_result: ToolResult
|
||||||
|
) -> Union[AgentAction, AgentFinish]:
|
||||||
|
"""Handle the AgentAction, execute tools, and process the results."""
|
||||||
|
add_image_tool = self._i18n.tools("add_image")
|
||||||
|
if (
|
||||||
|
isinstance(add_image_tool, dict)
|
||||||
|
and formatted_answer.tool.casefold().strip()
|
||||||
|
== add_image_tool.get("name", "").casefold().strip()
|
||||||
|
):
|
||||||
|
self.messages.append(tool_result.result)
|
||||||
|
return formatted_answer # Continue the loop
|
||||||
|
|
||||||
|
if self.step_callback:
|
||||||
|
self.step_callback(tool_result)
|
||||||
|
|
||||||
|
formatted_answer.text += f"\nObservation: {tool_result.result}"
|
||||||
|
formatted_answer.result = tool_result.result
|
||||||
|
|
||||||
|
if tool_result.result_as_answer:
|
||||||
|
return AgentFinish(
|
||||||
|
thought="",
|
||||||
|
output=tool_result.result,
|
||||||
|
text=formatted_answer.text,
|
||||||
|
)
|
||||||
|
|
||||||
self._show_logs(formatted_answer)
|
self._show_logs(formatted_answer)
|
||||||
return formatted_answer
|
return formatted_answer
|
||||||
|
|
||||||
|
def _invoke_step_callback(self, formatted_answer) -> None:
|
||||||
|
"""Invoke the step callback if it exists."""
|
||||||
|
if self.step_callback:
|
||||||
|
self.step_callback(formatted_answer)
|
||||||
|
|
||||||
|
def _append_message(self, text: str, role: str = "assistant") -> None:
|
||||||
|
"""Append a message to the message list with the given role."""
|
||||||
|
self.messages.append(self._format_msg(text, role=role))
|
||||||
|
|
||||||
|
def _handle_output_parser_exception(self, e: OutputParserException) -> AgentAction:
|
||||||
|
"""Handle OutputParserException by updating messages and formatted_answer."""
|
||||||
|
self.messages.append({"role": "user", "content": e.error})
|
||||||
|
|
||||||
|
formatted_answer = AgentAction(
|
||||||
|
text=e.error,
|
||||||
|
tool="",
|
||||||
|
tool_input="",
|
||||||
|
thought="",
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.iterations > self.log_error_after:
|
||||||
|
self._printer.print(
|
||||||
|
content=f"Error parsing LLM output, agent will retry: {e.error}",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
|
||||||
|
return formatted_answer
|
||||||
|
|
||||||
|
def _is_context_length_exceeded(self, exception: Exception) -> bool:
|
||||||
|
"""Check if the exception is due to context length exceeding."""
|
||||||
|
return LLMContextLengthExceededException(
|
||||||
|
str(exception)
|
||||||
|
)._is_context_limit_error(str(exception))
|
||||||
|
|
||||||
def _show_start_logs(self):
|
def _show_start_logs(self):
|
||||||
if self.agent is None:
|
if self.agent is None:
|
||||||
raise ValueError("Agent cannot be None")
|
raise ValueError("Agent cannot be None")
|
||||||
@@ -218,8 +303,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self._printer.print(
|
self._printer.print(
|
||||||
content=f"\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
|
content=f"\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
|
||||||
)
|
)
|
||||||
|
description = (
|
||||||
|
getattr(self.task, "description") if self.task else "Not Found"
|
||||||
|
)
|
||||||
self._printer.print(
|
self._printer.print(
|
||||||
content=f"\033[95m## Task:\033[00m \033[92m{self.task.description}\033[00m"
|
content=f"\033[95m## Task:\033[00m \033[92m{description}\033[00m"
|
||||||
)
|
)
|
||||||
|
|
||||||
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
|
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
|
||||||
@@ -272,7 +360,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
agent=self.agent,
|
agent=self.agent,
|
||||||
action=agent_action,
|
action=agent_action,
|
||||||
)
|
)
|
||||||
tool_calling = tool_usage.parse(agent_action.text)
|
tool_calling = tool_usage.parse_tool_calling(agent_action.text)
|
||||||
|
|
||||||
if isinstance(tool_calling, ToolUsageErrorException):
|
if isinstance(tool_calling, ToolUsageErrorException):
|
||||||
tool_result = tool_calling.message
|
tool_result = tool_calling.message
|
||||||
@@ -344,58 +432,50 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _handle_crew_training_output(
|
def _handle_crew_training_output(
|
||||||
self, result: AgentFinish, human_feedback: str | None = None
|
self, result: AgentFinish, human_feedback: Optional[str] = None
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Function to handle the process of the training data."""
|
"""Handle the process of saving training data."""
|
||||||
agent_id = str(self.agent.id) # type: ignore
|
agent_id = str(self.agent.id) # type: ignore
|
||||||
|
train_iteration = (
|
||||||
|
getattr(self.crew, "_train_iteration", None) if self.crew else None
|
||||||
|
)
|
||||||
|
|
||||||
|
if train_iteration is None or not isinstance(train_iteration, int):
|
||||||
|
self._printer.print(
|
||||||
|
content="Invalid or missing train iteration. Cannot save training data.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
# Load training data
|
|
||||||
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
|
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
|
||||||
training_data = training_handler.load()
|
training_data = training_handler.load() or {}
|
||||||
|
|
||||||
# Check if training data exists, human input is not requested, and self.crew is valid
|
# Initialize or retrieve agent's training data
|
||||||
if training_data and not self.ask_for_human_input:
|
agent_training_data = training_data.get(agent_id, {})
|
||||||
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
|
||||||
train_iteration = self.crew._train_iteration
|
|
||||||
if agent_id in training_data and isinstance(train_iteration, int):
|
|
||||||
training_data[agent_id][train_iteration][
|
|
||||||
"improved_output"
|
|
||||||
] = result.output
|
|
||||||
training_handler.save(training_data)
|
|
||||||
else:
|
|
||||||
self._printer.print(
|
|
||||||
content="Invalid train iteration type or agent_id not in training data.",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self._printer.print(
|
|
||||||
content="Crew is None or does not have _train_iteration attribute.",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.ask_for_human_input and human_feedback is not None:
|
if human_feedback is not None:
|
||||||
training_data = {
|
# Save initial output and human feedback
|
||||||
|
agent_training_data[train_iteration] = {
|
||||||
"initial_output": result.output,
|
"initial_output": result.output,
|
||||||
"human_feedback": human_feedback,
|
"human_feedback": human_feedback,
|
||||||
"agent": agent_id,
|
|
||||||
"agent_role": self.agent.role, # type: ignore
|
|
||||||
}
|
}
|
||||||
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
else:
|
||||||
train_iteration = self.crew._train_iteration
|
# Save improved output
|
||||||
if isinstance(train_iteration, int):
|
if train_iteration in agent_training_data:
|
||||||
CrewTrainingHandler(TRAINING_DATA_FILE).append(
|
agent_training_data[train_iteration]["improved_output"] = result.output
|
||||||
train_iteration, agent_id, training_data
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self._printer.print(
|
|
||||||
content="Invalid train iteration type. Expected int.",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
self._printer.print(
|
self._printer.print(
|
||||||
content="Crew is None or does not have _train_iteration attribute.",
|
content=(
|
||||||
|
f"No existing training data for agent {agent_id} and iteration "
|
||||||
|
f"{train_iteration}. Cannot save improved output."
|
||||||
|
),
|
||||||
color="red",
|
color="red",
|
||||||
)
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Update the training data and save
|
||||||
|
training_data[agent_id] = agent_training_data
|
||||||
|
training_handler.save(training_data)
|
||||||
|
|
||||||
def _format_prompt(self, prompt: str, inputs: Dict[str, str]) -> str:
|
def _format_prompt(self, prompt: str, inputs: Dict[str, str]) -> str:
|
||||||
prompt = prompt.replace("{input}", inputs["input"])
|
prompt = prompt.replace("{input}", inputs["input"])
|
||||||
@@ -411,79 +491,150 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
return {"role": role, "content": prompt}
|
return {"role": role, "content": prompt}
|
||||||
|
|
||||||
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
|
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
|
||||||
"""
|
"""Handle human feedback with different flows for training vs regular use.
|
||||||
Handles the human feedback loop, allowing the user to provide feedback
|
|
||||||
on the agent's output and determining if additional iterations are needed.
|
|
||||||
|
|
||||||
Parameters:
|
Args:
|
||||||
formatted_answer (AgentFinish): The initial output from the agent.
|
formatted_answer: The initial AgentFinish result to get feedback on
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
AgentFinish: The final output after incorporating human feedback.
|
AgentFinish: The final answer after processing feedback
|
||||||
"""
|
"""
|
||||||
|
human_feedback = self._ask_human_input(formatted_answer.output)
|
||||||
|
|
||||||
|
if self._is_training_mode():
|
||||||
|
return self._handle_training_feedback(formatted_answer, human_feedback)
|
||||||
|
|
||||||
|
return self._handle_regular_feedback(formatted_answer, human_feedback)
|
||||||
|
|
||||||
|
def _is_training_mode(self) -> bool:
|
||||||
|
"""Check if crew is in training mode."""
|
||||||
|
return bool(self.crew and self.crew._train)
|
||||||
|
|
||||||
|
def _handle_training_feedback(
|
||||||
|
self, initial_answer: AgentFinish, feedback: str
|
||||||
|
) -> AgentFinish:
|
||||||
|
"""Process feedback for training scenarios with single iteration."""
|
||||||
|
self._printer.print(
|
||||||
|
content="\nProcessing training feedback.\n",
|
||||||
|
color="yellow",
|
||||||
|
)
|
||||||
|
self._handle_crew_training_output(initial_answer, feedback)
|
||||||
|
self.messages.append(
|
||||||
|
self._format_msg(
|
||||||
|
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
improved_answer = self._invoke_loop()
|
||||||
|
self._handle_crew_training_output(improved_answer)
|
||||||
|
self.ask_for_human_input = False
|
||||||
|
return improved_answer
|
||||||
|
|
||||||
|
def _handle_regular_feedback(
|
||||||
|
self, current_answer: AgentFinish, initial_feedback: str
|
||||||
|
) -> AgentFinish:
|
||||||
|
"""Process feedback for regular use with potential multiple iterations."""
|
||||||
|
feedback = initial_feedback
|
||||||
|
answer = current_answer
|
||||||
|
|
||||||
while self.ask_for_human_input:
|
while self.ask_for_human_input:
|
||||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
response = self._get_llm_feedback_response(feedback)
|
||||||
|
|
||||||
if self.crew and self.crew._train:
|
if not self._feedback_requires_changes(response):
|
||||||
self._handle_crew_training_output(formatted_answer, human_feedback)
|
|
||||||
|
|
||||||
# Make an LLM call to verify if additional changes are requested based on human feedback
|
|
||||||
additional_changes_prompt = self._i18n.slice(
|
|
||||||
"human_feedback_classification"
|
|
||||||
).format(feedback=human_feedback)
|
|
||||||
|
|
||||||
retry_count = 0
|
|
||||||
llm_call_successful = False
|
|
||||||
additional_changes_response = None
|
|
||||||
|
|
||||||
while retry_count < MAX_LLM_RETRY and not llm_call_successful:
|
|
||||||
try:
|
|
||||||
additional_changes_response = (
|
|
||||||
self.llm.call(
|
|
||||||
[
|
|
||||||
self._format_msg(
|
|
||||||
additional_changes_prompt, role="system"
|
|
||||||
)
|
|
||||||
],
|
|
||||||
callbacks=self.callbacks,
|
|
||||||
)
|
|
||||||
.strip()
|
|
||||||
.lower()
|
|
||||||
)
|
|
||||||
llm_call_successful = True
|
|
||||||
except Exception as e:
|
|
||||||
retry_count += 1
|
|
||||||
|
|
||||||
self._printer.print(
|
|
||||||
content=f"Error during LLM call to classify human feedback: {e}. Retrying... ({retry_count}/{MAX_LLM_RETRY})",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
|
|
||||||
if not llm_call_successful:
|
|
||||||
self._printer.print(
|
|
||||||
content="Error processing feedback after multiple attempts.",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
self.ask_for_human_input = False
|
self.ask_for_human_input = False
|
||||||
break
|
|
||||||
|
|
||||||
if additional_changes_response == "false":
|
|
||||||
self.ask_for_human_input = False
|
|
||||||
elif additional_changes_response == "true":
|
|
||||||
self.ask_for_human_input = True
|
|
||||||
# Add human feedback to messages
|
|
||||||
self.messages.append(self._format_msg(f"Feedback: {human_feedback}"))
|
|
||||||
# Invoke the loop again with updated messages
|
|
||||||
formatted_answer = self._invoke_loop()
|
|
||||||
|
|
||||||
if self.crew and self.crew._train:
|
|
||||||
self._handle_crew_training_output(formatted_answer)
|
|
||||||
else:
|
else:
|
||||||
# Unexpected response
|
answer = self._process_feedback_iteration(feedback)
|
||||||
self._printer.print(
|
feedback = self._ask_human_input(answer.output)
|
||||||
content=f"Unexpected response from LLM: '{additional_changes_response}'. Assuming no additional changes requested.",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
self.ask_for_human_input = False
|
|
||||||
|
|
||||||
|
return answer
|
||||||
|
|
||||||
|
def _get_llm_feedback_response(self, feedback: str) -> Optional[str]:
|
||||||
|
"""Get LLM classification of whether feedback requires changes."""
|
||||||
|
prompt = self._i18n.slice("human_feedback_classification").format(
|
||||||
|
feedback=feedback
|
||||||
|
)
|
||||||
|
message = self._format_msg(prompt, role="system")
|
||||||
|
|
||||||
|
for retry in range(MAX_LLM_RETRY):
|
||||||
|
try:
|
||||||
|
response = self.llm.call([message], callbacks=self.callbacks)
|
||||||
|
return response.strip().lower() if response else None
|
||||||
|
except Exception as error:
|
||||||
|
self._log_feedback_error(retry, error)
|
||||||
|
|
||||||
|
self._log_max_retries_exceeded()
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _feedback_requires_changes(self, response: Optional[str]) -> bool:
|
||||||
|
"""Determine if feedback response indicates need for changes."""
|
||||||
|
return response == "true" if response else False
|
||||||
|
|
||||||
|
def _process_feedback_iteration(self, feedback: str) -> AgentFinish:
|
||||||
|
"""Process a single feedback iteration."""
|
||||||
|
self.messages.append(
|
||||||
|
self._format_msg(
|
||||||
|
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return self._invoke_loop()
|
||||||
|
|
||||||
|
def _log_feedback_error(self, retry_count: int, error: Exception) -> None:
|
||||||
|
"""Log feedback processing errors."""
|
||||||
|
self._printer.print(
|
||||||
|
content=(
|
||||||
|
f"Error processing feedback: {error}. "
|
||||||
|
f"Retrying... ({retry_count + 1}/{MAX_LLM_RETRY})"
|
||||||
|
),
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _log_max_retries_exceeded(self) -> None:
|
||||||
|
"""Log when max retries for feedback processing are exceeded."""
|
||||||
|
self._printer.print(
|
||||||
|
content=(
|
||||||
|
f"Failed to process feedback after {MAX_LLM_RETRY} attempts. "
|
||||||
|
"Ending feedback loop."
|
||||||
|
),
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _handle_max_iterations_exceeded(self, formatted_answer):
|
||||||
|
"""
|
||||||
|
Handles the case when the maximum number of iterations is exceeded.
|
||||||
|
Performs one more LLM call to get the final answer.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
formatted_answer: The last formatted answer from the agent.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The final formatted answer after exceeding max iterations.
|
||||||
|
"""
|
||||||
|
self._printer.print(
|
||||||
|
content="Maximum iterations reached. Requesting final answer.",
|
||||||
|
color="yellow",
|
||||||
|
)
|
||||||
|
|
||||||
|
if formatted_answer and hasattr(formatted_answer, "text"):
|
||||||
|
assistant_message = (
|
||||||
|
formatted_answer.text + f'\n{self._i18n.errors("force_final_answer")}'
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
assistant_message = self._i18n.errors("force_final_answer")
|
||||||
|
|
||||||
|
self.messages.append(self._format_msg(assistant_message, role="assistant"))
|
||||||
|
|
||||||
|
# Perform one more LLM call to get the final answer
|
||||||
|
answer = self.llm.call(
|
||||||
|
self.messages,
|
||||||
|
callbacks=self.callbacks,
|
||||||
|
)
|
||||||
|
|
||||||
|
if answer is None or answer == "":
|
||||||
|
self._printer.print(
|
||||||
|
content="Received None or empty response from LLM call.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||||
|
|
||||||
|
formatted_answer = self._format_answer(answer)
|
||||||
|
# Return the formatted answer, regardless of its type
|
||||||
return formatted_answer
|
return formatted_answer
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
|
import os
|
||||||
from importlib.metadata import version as get_version
|
from importlib.metadata import version as get_version
|
||||||
from typing import Optional
|
from typing import Optional, Tuple
|
||||||
|
|
||||||
import click
|
import click
|
||||||
|
|
||||||
from crewai.cli.add_crew_to_flow import add_crew_to_flow
|
from crewai.cli.add_crew_to_flow import add_crew_to_flow
|
||||||
from crewai.cli.create_crew import create_crew
|
from crewai.cli.create_crew import create_crew
|
||||||
from crewai.cli.create_flow import create_flow
|
from crewai.cli.create_flow import create_flow
|
||||||
|
from crewai.cli.crew_chat import run_chat
|
||||||
from crewai.memory.storage.kickoff_task_outputs_storage import (
|
from crewai.memory.storage.kickoff_task_outputs_storage import (
|
||||||
KickoffTaskOutputsSQLiteStorage,
|
KickoffTaskOutputsSQLiteStorage,
|
||||||
)
|
)
|
||||||
@@ -342,5 +344,18 @@ def flow_add_crew(crew_name):
|
|||||||
add_crew_to_flow(crew_name)
|
add_crew_to_flow(crew_name)
|
||||||
|
|
||||||
|
|
||||||
|
@crewai.command()
|
||||||
|
def chat():
|
||||||
|
"""
|
||||||
|
Start a conversation with the Crew, collecting user-supplied inputs,
|
||||||
|
and using the Chat LLM to generate responses.
|
||||||
|
"""
|
||||||
|
click.secho(
|
||||||
|
"\nStarting a conversation with the Crew\n" "Type 'exit' or Ctrl+C to quit.\n",
|
||||||
|
)
|
||||||
|
|
||||||
|
run_chat()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
crewai()
|
crewai()
|
||||||
|
|||||||
@@ -17,6 +17,12 @@ ENV_VARS = {
|
|||||||
"key_name": "GEMINI_API_KEY",
|
"key_name": "GEMINI_API_KEY",
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
"nvidia_nim": [
|
||||||
|
{
|
||||||
|
"prompt": "Enter your NVIDIA API key (press Enter to skip)",
|
||||||
|
"key_name": "NVIDIA_NIM_API_KEY",
|
||||||
|
}
|
||||||
|
],
|
||||||
"groq": [
|
"groq": [
|
||||||
{
|
{
|
||||||
"prompt": "Enter your GROQ API key (press Enter to skip)",
|
"prompt": "Enter your GROQ API key (press Enter to skip)",
|
||||||
@@ -85,6 +91,12 @@ ENV_VARS = {
|
|||||||
"key_name": "CEREBRAS_API_KEY",
|
"key_name": "CEREBRAS_API_KEY",
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
"sambanova": [
|
||||||
|
{
|
||||||
|
"prompt": "Enter your SambaNovaCloud API key (press Enter to skip)",
|
||||||
|
"key_name": "SAMBANOVA_API_KEY",
|
||||||
|
}
|
||||||
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -92,12 +104,14 @@ PROVIDERS = [
|
|||||||
"openai",
|
"openai",
|
||||||
"anthropic",
|
"anthropic",
|
||||||
"gemini",
|
"gemini",
|
||||||
|
"nvidia_nim",
|
||||||
"groq",
|
"groq",
|
||||||
"ollama",
|
"ollama",
|
||||||
"watson",
|
"watson",
|
||||||
"bedrock",
|
"bedrock",
|
||||||
"azure",
|
"azure",
|
||||||
"cerebras",
|
"cerebras",
|
||||||
|
"sambanova",
|
||||||
]
|
]
|
||||||
|
|
||||||
MODELS = {
|
MODELS = {
|
||||||
@@ -114,6 +128,75 @@ MODELS = {
|
|||||||
"gemini/gemini-gemma-2-9b-it",
|
"gemini/gemini-gemma-2-9b-it",
|
||||||
"gemini/gemini-gemma-2-27b-it",
|
"gemini/gemini-gemma-2-27b-it",
|
||||||
],
|
],
|
||||||
|
"nvidia_nim": [
|
||||||
|
"nvidia_nim/nvidia/mistral-nemo-minitron-8b-8k-instruct",
|
||||||
|
"nvidia_nim/nvidia/nemotron-4-mini-hindi-4b-instruct",
|
||||||
|
"nvidia_nim/nvidia/llama-3.1-nemotron-70b-instruct",
|
||||||
|
"nvidia_nim/nvidia/llama3-chatqa-1.5-8b",
|
||||||
|
"nvidia_nim/nvidia/llama3-chatqa-1.5-70b",
|
||||||
|
"nvidia_nim/nvidia/vila",
|
||||||
|
"nvidia_nim/nvidia/neva-22",
|
||||||
|
"nvidia_nim/nvidia/nemotron-mini-4b-instruct",
|
||||||
|
"nvidia_nim/nvidia/usdcode-llama3-70b-instruct",
|
||||||
|
"nvidia_nim/nvidia/nemotron-4-340b-instruct",
|
||||||
|
"nvidia_nim/meta/codellama-70b",
|
||||||
|
"nvidia_nim/meta/llama2-70b",
|
||||||
|
"nvidia_nim/meta/llama3-8b-instruct",
|
||||||
|
"nvidia_nim/meta/llama3-70b-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.1-8b-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.1-70b-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.1-405b-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.2-1b-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.2-3b-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.2-11b-vision-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.2-90b-vision-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.1-70b-instruct",
|
||||||
|
"nvidia_nim/google/gemma-7b",
|
||||||
|
"nvidia_nim/google/gemma-2b",
|
||||||
|
"nvidia_nim/google/codegemma-7b",
|
||||||
|
"nvidia_nim/google/codegemma-1.1-7b",
|
||||||
|
"nvidia_nim/google/recurrentgemma-2b",
|
||||||
|
"nvidia_nim/google/gemma-2-9b-it",
|
||||||
|
"nvidia_nim/google/gemma-2-27b-it",
|
||||||
|
"nvidia_nim/google/gemma-2-2b-it",
|
||||||
|
"nvidia_nim/google/deplot",
|
||||||
|
"nvidia_nim/google/paligemma",
|
||||||
|
"nvidia_nim/mistralai/mistral-7b-instruct-v0.2",
|
||||||
|
"nvidia_nim/mistralai/mixtral-8x7b-instruct-v0.1",
|
||||||
|
"nvidia_nim/mistralai/mistral-large",
|
||||||
|
"nvidia_nim/mistralai/mixtral-8x22b-instruct-v0.1",
|
||||||
|
"nvidia_nim/mistralai/mistral-7b-instruct-v0.3",
|
||||||
|
"nvidia_nim/nv-mistralai/mistral-nemo-12b-instruct",
|
||||||
|
"nvidia_nim/mistralai/mamba-codestral-7b-v0.1",
|
||||||
|
"nvidia_nim/microsoft/phi-3-mini-128k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3-mini-4k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3-small-8k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3-small-128k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3-medium-4k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3-medium-128k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3.5-mini-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3.5-moe-instruct",
|
||||||
|
"nvidia_nim/microsoft/kosmos-2",
|
||||||
|
"nvidia_nim/microsoft/phi-3-vision-128k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3.5-vision-instruct",
|
||||||
|
"nvidia_nim/databricks/dbrx-instruct",
|
||||||
|
"nvidia_nim/snowflake/arctic",
|
||||||
|
"nvidia_nim/aisingapore/sea-lion-7b-instruct",
|
||||||
|
"nvidia_nim/ibm/granite-8b-code-instruct",
|
||||||
|
"nvidia_nim/ibm/granite-34b-code-instruct",
|
||||||
|
"nvidia_nim/ibm/granite-3.0-8b-instruct",
|
||||||
|
"nvidia_nim/ibm/granite-3.0-3b-a800m-instruct",
|
||||||
|
"nvidia_nim/mediatek/breeze-7b-instruct",
|
||||||
|
"nvidia_nim/upstage/solar-10.7b-instruct",
|
||||||
|
"nvidia_nim/writer/palmyra-med-70b-32k",
|
||||||
|
"nvidia_nim/writer/palmyra-med-70b",
|
||||||
|
"nvidia_nim/writer/palmyra-fin-70b-32k",
|
||||||
|
"nvidia_nim/01-ai/yi-large",
|
||||||
|
"nvidia_nim/deepseek-ai/deepseek-coder-6.7b-instruct",
|
||||||
|
"nvidia_nim/rakuten/rakutenai-7b-instruct",
|
||||||
|
"nvidia_nim/rakuten/rakutenai-7b-chat",
|
||||||
|
"nvidia_nim/baichuan-inc/baichuan2-13b-chat",
|
||||||
|
],
|
||||||
"groq": [
|
"groq": [
|
||||||
"groq/llama-3.1-8b-instant",
|
"groq/llama-3.1-8b-instant",
|
||||||
"groq/llama-3.1-70b-versatile",
|
"groq/llama-3.1-70b-versatile",
|
||||||
@@ -156,8 +239,23 @@ MODELS = {
|
|||||||
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
||||||
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
||||||
],
|
],
|
||||||
|
"sambanova": [
|
||||||
|
"sambanova/Meta-Llama-3.3-70B-Instruct",
|
||||||
|
"sambanova/QwQ-32B-Preview",
|
||||||
|
"sambanova/Qwen2.5-72B-Instruct",
|
||||||
|
"sambanova/Qwen2.5-Coder-32B-Instruct",
|
||||||
|
"sambanova/Meta-Llama-3.1-405B-Instruct",
|
||||||
|
"sambanova/Meta-Llama-3.1-70B-Instruct",
|
||||||
|
"sambanova/Meta-Llama-3.1-8B-Instruct",
|
||||||
|
"sambanova/Llama-3.2-90B-Vision-Instruct",
|
||||||
|
"sambanova/Llama-3.2-11B-Vision-Instruct",
|
||||||
|
"sambanova/Meta-Llama-3.2-3B-Instruct",
|
||||||
|
"sambanova/Meta-Llama-3.2-1B-Instruct",
|
||||||
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DEFAULT_LLM_MODEL = "gpt-4o-mini"
|
||||||
|
|
||||||
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
536
src/crewai/cli/crew_chat.py
Normal file
536
src/crewai/cli/crew_chat.py
Normal file
@@ -0,0 +1,536 @@
|
|||||||
|
import json
|
||||||
|
import platform
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||||
|
|
||||||
|
import click
|
||||||
|
import tomli
|
||||||
|
from packaging import version
|
||||||
|
|
||||||
|
from crewai.cli.utils import read_toml
|
||||||
|
from crewai.cli.version import get_crewai_version
|
||||||
|
from crewai.crew import Crew
|
||||||
|
from crewai.llm import LLM
|
||||||
|
from crewai.types.crew_chat import ChatInputField, ChatInputs
|
||||||
|
from crewai.utilities.llm_utils import create_llm
|
||||||
|
|
||||||
|
MIN_REQUIRED_VERSION = "0.98.0"
|
||||||
|
|
||||||
|
|
||||||
|
def check_conversational_crews_version(
|
||||||
|
crewai_version: str, pyproject_data: dict
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Check if the installed crewAI version supports conversational crews.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
crewai_version: The current version of crewAI.
|
||||||
|
pyproject_data: Dictionary containing pyproject.toml data.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if version check passes, False otherwise.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if version.parse(crewai_version) < version.parse(MIN_REQUIRED_VERSION):
|
||||||
|
click.secho(
|
||||||
|
"You are using an older version of crewAI that doesn't support conversational crews. "
|
||||||
|
"Run 'uv upgrade crewai' to get the latest version.",
|
||||||
|
fg="red",
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
except version.InvalidVersion:
|
||||||
|
click.secho("Invalid crewAI version format detected.", fg="red")
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def run_chat():
|
||||||
|
"""
|
||||||
|
Runs an interactive chat loop using the Crew's chat LLM with function calling.
|
||||||
|
Incorporates crew_name, crew_description, and input fields to build a tool schema.
|
||||||
|
Exits if crew_name or crew_description are missing.
|
||||||
|
"""
|
||||||
|
crewai_version = get_crewai_version()
|
||||||
|
pyproject_data = read_toml()
|
||||||
|
|
||||||
|
if not check_conversational_crews_version(crewai_version, pyproject_data):
|
||||||
|
return
|
||||||
|
|
||||||
|
crew, crew_name = load_crew_and_name()
|
||||||
|
chat_llm = initialize_chat_llm(crew)
|
||||||
|
if not chat_llm:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Indicate that the crew is being analyzed
|
||||||
|
click.secho(
|
||||||
|
"\nAnalyzing crew and required inputs - this may take 3 to 30 seconds "
|
||||||
|
"depending on the complexity of your crew.",
|
||||||
|
fg="white",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Start loading indicator
|
||||||
|
loading_complete = threading.Event()
|
||||||
|
loading_thread = threading.Thread(target=show_loading, args=(loading_complete,))
|
||||||
|
loading_thread.start()
|
||||||
|
|
||||||
|
try:
|
||||||
|
crew_chat_inputs = generate_crew_chat_inputs(crew, crew_name, chat_llm)
|
||||||
|
crew_tool_schema = generate_crew_tool_schema(crew_chat_inputs)
|
||||||
|
system_message = build_system_message(crew_chat_inputs)
|
||||||
|
|
||||||
|
# Call the LLM to generate the introductory message
|
||||||
|
introductory_message = chat_llm.call(
|
||||||
|
messages=[{"role": "system", "content": system_message}]
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
# Stop loading indicator
|
||||||
|
loading_complete.set()
|
||||||
|
loading_thread.join()
|
||||||
|
|
||||||
|
# Indicate that the analysis is complete
|
||||||
|
click.secho("\nFinished analyzing crew.\n", fg="white")
|
||||||
|
|
||||||
|
click.secho(f"Assistant: {introductory_message}\n", fg="green")
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
{"role": "system", "content": system_message},
|
||||||
|
{"role": "assistant", "content": introductory_message},
|
||||||
|
]
|
||||||
|
|
||||||
|
available_functions = {
|
||||||
|
crew_chat_inputs.crew_name: create_tool_function(crew, messages),
|
||||||
|
}
|
||||||
|
|
||||||
|
chat_loop(chat_llm, messages, crew_tool_schema, available_functions)
|
||||||
|
|
||||||
|
|
||||||
|
def show_loading(event: threading.Event):
|
||||||
|
"""Display animated loading dots while processing."""
|
||||||
|
while not event.is_set():
|
||||||
|
print(".", end="", flush=True)
|
||||||
|
time.sleep(1)
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
def initialize_chat_llm(crew: Crew) -> Optional[LLM]:
|
||||||
|
"""Initializes the chat LLM and handles exceptions."""
|
||||||
|
try:
|
||||||
|
return create_llm(crew.chat_llm)
|
||||||
|
except Exception as e:
|
||||||
|
click.secho(
|
||||||
|
f"Unable to find a Chat LLM. Please make sure you set chat_llm on the crew: {e}",
|
||||||
|
fg="red",
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def build_system_message(crew_chat_inputs: ChatInputs) -> str:
|
||||||
|
"""Builds the initial system message for the chat."""
|
||||||
|
required_fields_str = (
|
||||||
|
", ".join(
|
||||||
|
f"{field.name} (desc: {field.description or 'n/a'})"
|
||||||
|
for field in crew_chat_inputs.inputs
|
||||||
|
)
|
||||||
|
or "(No required fields detected)"
|
||||||
|
)
|
||||||
|
|
||||||
|
return (
|
||||||
|
"You are a helpful AI assistant for the CrewAI platform. "
|
||||||
|
"Your primary purpose is to assist users with the crew's specific tasks. "
|
||||||
|
"You can answer general questions, but should guide users back to the crew's purpose afterward. "
|
||||||
|
"For example, after answering a general question, remind the user of your main purpose, such as generating a research report, and prompt them to specify a topic or task related to the crew's purpose. "
|
||||||
|
"You have a function (tool) you can call by name if you have all required inputs. "
|
||||||
|
f"Those required inputs are: {required_fields_str}. "
|
||||||
|
"Once you have them, call the function. "
|
||||||
|
"Please keep your responses concise and friendly. "
|
||||||
|
"If a user asks a question outside the crew's scope, provide a brief answer and remind them of the crew's purpose. "
|
||||||
|
"After calling the tool, be prepared to take user feedback and make adjustments as needed. "
|
||||||
|
"If you are ever unsure about a user's request or need clarification, ask the user for more information. "
|
||||||
|
"Before doing anything else, introduce yourself with a friendly message like: 'Hey! I'm here to help you with [crew's purpose]. Could you please provide me with [inputs] so we can get started?' "
|
||||||
|
"For example: 'Hey! I'm here to help you with uncovering and reporting cutting-edge developments through thorough research and detailed analysis. Could you please provide me with a topic you're interested in? This will help us generate a comprehensive research report and detailed analysis.'"
|
||||||
|
f"\nCrew Name: {crew_chat_inputs.crew_name}"
|
||||||
|
f"\nCrew Description: {crew_chat_inputs.crew_description}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def create_tool_function(crew: Crew, messages: List[Dict[str, str]]) -> Any:
|
||||||
|
"""Creates a wrapper function for running the crew tool with messages."""
|
||||||
|
|
||||||
|
def run_crew_tool_with_messages(**kwargs):
|
||||||
|
return run_crew_tool(crew, messages, **kwargs)
|
||||||
|
|
||||||
|
return run_crew_tool_with_messages
|
||||||
|
|
||||||
|
|
||||||
|
def flush_input():
|
||||||
|
"""Flush any pending input from the user."""
|
||||||
|
if platform.system() == "Windows":
|
||||||
|
# Windows platform
|
||||||
|
import msvcrt
|
||||||
|
|
||||||
|
while msvcrt.kbhit():
|
||||||
|
msvcrt.getch()
|
||||||
|
else:
|
||||||
|
# Unix-like platforms (Linux, macOS)
|
||||||
|
import termios
|
||||||
|
|
||||||
|
termios.tcflush(sys.stdin, termios.TCIFLUSH)
|
||||||
|
|
||||||
|
|
||||||
|
def chat_loop(chat_llm, messages, crew_tool_schema, available_functions):
|
||||||
|
"""Main chat loop for interacting with the user."""
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# Flush any pending input before accepting new input
|
||||||
|
flush_input()
|
||||||
|
|
||||||
|
user_input = get_user_input()
|
||||||
|
handle_user_input(
|
||||||
|
user_input, chat_llm, messages, crew_tool_schema, available_functions
|
||||||
|
)
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
click.echo("\nExiting chat. Goodbye!")
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
click.secho(f"An error occurred: {e}", fg="red")
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
def get_user_input() -> str:
|
||||||
|
"""Collect multi-line user input with exit handling."""
|
||||||
|
click.secho(
|
||||||
|
"\nYou (type your message below. Press 'Enter' twice when you're done):",
|
||||||
|
fg="blue",
|
||||||
|
)
|
||||||
|
user_input_lines = []
|
||||||
|
while True:
|
||||||
|
line = input()
|
||||||
|
if line.strip().lower() == "exit":
|
||||||
|
return "exit"
|
||||||
|
if line == "":
|
||||||
|
break
|
||||||
|
user_input_lines.append(line)
|
||||||
|
return "\n".join(user_input_lines)
|
||||||
|
|
||||||
|
|
||||||
|
def handle_user_input(
|
||||||
|
user_input: str,
|
||||||
|
chat_llm: LLM,
|
||||||
|
messages: List[Dict[str, str]],
|
||||||
|
crew_tool_schema: Dict[str, Any],
|
||||||
|
available_functions: Dict[str, Any],
|
||||||
|
) -> None:
|
||||||
|
if user_input.strip().lower() == "exit":
|
||||||
|
click.echo("Exiting chat. Goodbye!")
|
||||||
|
return
|
||||||
|
|
||||||
|
if not user_input.strip():
|
||||||
|
click.echo("Empty message. Please provide input or type 'exit' to quit.")
|
||||||
|
return
|
||||||
|
|
||||||
|
messages.append({"role": "user", "content": user_input})
|
||||||
|
|
||||||
|
# Indicate that assistant is processing
|
||||||
|
click.echo()
|
||||||
|
click.secho("Assistant is processing your input. Please wait...", fg="green")
|
||||||
|
|
||||||
|
# Process assistant's response
|
||||||
|
final_response = chat_llm.call(
|
||||||
|
messages=messages,
|
||||||
|
tools=[crew_tool_schema],
|
||||||
|
available_functions=available_functions,
|
||||||
|
)
|
||||||
|
|
||||||
|
messages.append({"role": "assistant", "content": final_response})
|
||||||
|
click.secho(f"\nAssistant: {final_response}\n", fg="green")
|
||||||
|
|
||||||
|
|
||||||
|
def generate_crew_tool_schema(crew_inputs: ChatInputs) -> dict:
|
||||||
|
"""
|
||||||
|
Dynamically build a Littellm 'function' schema for the given crew.
|
||||||
|
|
||||||
|
crew_name: The name of the crew (used for the function 'name').
|
||||||
|
crew_inputs: A ChatInputs object containing crew_description
|
||||||
|
and a list of input fields (each with a name & description).
|
||||||
|
"""
|
||||||
|
properties = {}
|
||||||
|
for field in crew_inputs.inputs:
|
||||||
|
properties[field.name] = {
|
||||||
|
"type": "string",
|
||||||
|
"description": field.description or "No description provided",
|
||||||
|
}
|
||||||
|
|
||||||
|
required_fields = [field.name for field in crew_inputs.inputs]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": crew_inputs.crew_name,
|
||||||
|
"description": crew_inputs.crew_description or "No crew description",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": properties,
|
||||||
|
"required": required_fields,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def run_crew_tool(crew: Crew, messages: List[Dict[str, str]], **kwargs):
|
||||||
|
"""
|
||||||
|
Runs the crew using crew.kickoff(inputs=kwargs) and returns the output.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
crew (Crew): The crew instance to run.
|
||||||
|
messages (List[Dict[str, str]]): The chat messages up to this point.
|
||||||
|
**kwargs: The inputs collected from the user.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The output from the crew's execution.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SystemExit: Exits the chat if an error occurs during crew execution.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Serialize 'messages' to JSON string before adding to kwargs
|
||||||
|
kwargs["crew_chat_messages"] = json.dumps(messages)
|
||||||
|
|
||||||
|
# Run the crew with the provided inputs
|
||||||
|
crew_output = crew.kickoff(inputs=kwargs)
|
||||||
|
|
||||||
|
# Convert CrewOutput to a string to send back to the user
|
||||||
|
result = str(crew_output)
|
||||||
|
|
||||||
|
return result
|
||||||
|
except Exception as e:
|
||||||
|
# Exit the chat and show the error message
|
||||||
|
click.secho("An error occurred while running the crew:", fg="red")
|
||||||
|
click.secho(str(e), fg="red")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def load_crew_and_name() -> Tuple[Crew, str]:
|
||||||
|
"""
|
||||||
|
Loads the crew by importing the crew class from the user's project.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[Crew, str]: A tuple containing the Crew instance and the name of the crew.
|
||||||
|
"""
|
||||||
|
# Get the current working directory
|
||||||
|
cwd = Path.cwd()
|
||||||
|
|
||||||
|
# Path to the pyproject.toml file
|
||||||
|
pyproject_path = cwd / "pyproject.toml"
|
||||||
|
if not pyproject_path.exists():
|
||||||
|
raise FileNotFoundError("pyproject.toml not found in the current directory.")
|
||||||
|
|
||||||
|
# Load the pyproject.toml file using 'tomli'
|
||||||
|
with pyproject_path.open("rb") as f:
|
||||||
|
pyproject_data = tomli.load(f)
|
||||||
|
|
||||||
|
# Get the project name from the 'project' section
|
||||||
|
project_name = pyproject_data["project"]["name"]
|
||||||
|
folder_name = project_name
|
||||||
|
|
||||||
|
# Derive the crew class name from the project name
|
||||||
|
# E.g., if project_name is 'my_project', crew_class_name is 'MyProject'
|
||||||
|
crew_class_name = project_name.replace("_", " ").title().replace(" ", "")
|
||||||
|
|
||||||
|
# Add the 'src' directory to sys.path
|
||||||
|
src_path = cwd / "src"
|
||||||
|
if str(src_path) not in sys.path:
|
||||||
|
sys.path.insert(0, str(src_path))
|
||||||
|
|
||||||
|
# Import the crew module
|
||||||
|
crew_module_name = f"{folder_name}.crew"
|
||||||
|
try:
|
||||||
|
crew_module = __import__(crew_module_name, fromlist=[crew_class_name])
|
||||||
|
except ImportError as e:
|
||||||
|
raise ImportError(f"Failed to import crew module {crew_module_name}: {e}")
|
||||||
|
|
||||||
|
# Get the crew class from the module
|
||||||
|
try:
|
||||||
|
crew_class = getattr(crew_module, crew_class_name)
|
||||||
|
except AttributeError:
|
||||||
|
raise AttributeError(
|
||||||
|
f"Crew class {crew_class_name} not found in module {crew_module_name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Instantiate the crew
|
||||||
|
crew_instance = crew_class().crew()
|
||||||
|
return crew_instance, crew_class_name
|
||||||
|
|
||||||
|
|
||||||
|
def generate_crew_chat_inputs(crew: Crew, crew_name: str, chat_llm) -> ChatInputs:
|
||||||
|
"""
|
||||||
|
Generates the ChatInputs required for the crew by analyzing the tasks and agents.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
crew (Crew): The crew object containing tasks and agents.
|
||||||
|
crew_name (str): The name of the crew.
|
||||||
|
chat_llm: The chat language model to use for AI calls.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ChatInputs: An object containing the crew's name, description, and input fields.
|
||||||
|
"""
|
||||||
|
# Extract placeholders from tasks and agents
|
||||||
|
required_inputs = fetch_required_inputs(crew)
|
||||||
|
|
||||||
|
# Generate descriptions for each input using AI
|
||||||
|
input_fields = []
|
||||||
|
for input_name in required_inputs:
|
||||||
|
description = generate_input_description_with_ai(input_name, crew, chat_llm)
|
||||||
|
input_fields.append(ChatInputField(name=input_name, description=description))
|
||||||
|
|
||||||
|
# Generate crew description using AI
|
||||||
|
crew_description = generate_crew_description_with_ai(crew, chat_llm)
|
||||||
|
|
||||||
|
return ChatInputs(
|
||||||
|
crew_name=crew_name, crew_description=crew_description, inputs=input_fields
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_required_inputs(crew: Crew) -> Set[str]:
|
||||||
|
"""
|
||||||
|
Extracts placeholders from the crew's tasks and agents.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
crew (Crew): The crew object.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Set[str]: A set of placeholder names.
|
||||||
|
"""
|
||||||
|
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||||
|
required_inputs: Set[str] = set()
|
||||||
|
|
||||||
|
# Scan tasks
|
||||||
|
for task in crew.tasks:
|
||||||
|
text = f"{task.description or ''} {task.expected_output or ''}"
|
||||||
|
required_inputs.update(placeholder_pattern.findall(text))
|
||||||
|
|
||||||
|
# Scan agents
|
||||||
|
for agent in crew.agents:
|
||||||
|
text = f"{agent.role or ''} {agent.goal or ''} {agent.backstory or ''}"
|
||||||
|
required_inputs.update(placeholder_pattern.findall(text))
|
||||||
|
|
||||||
|
return required_inputs
|
||||||
|
|
||||||
|
|
||||||
|
def generate_input_description_with_ai(input_name: str, crew: Crew, chat_llm) -> str:
|
||||||
|
"""
|
||||||
|
Generates an input description using AI based on the context of the crew.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_name (str): The name of the input placeholder.
|
||||||
|
crew (Crew): The crew object.
|
||||||
|
chat_llm: The chat language model to use for AI calls.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: A concise description of the input.
|
||||||
|
"""
|
||||||
|
# Gather context from tasks and agents where the input is used
|
||||||
|
context_texts = []
|
||||||
|
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||||
|
|
||||||
|
for task in crew.tasks:
|
||||||
|
if (
|
||||||
|
f"{{{input_name}}}" in task.description
|
||||||
|
or f"{{{input_name}}}" in task.expected_output
|
||||||
|
):
|
||||||
|
# Replace placeholders with input names
|
||||||
|
task_description = placeholder_pattern.sub(
|
||||||
|
lambda m: m.group(1), task.description or ""
|
||||||
|
)
|
||||||
|
expected_output = placeholder_pattern.sub(
|
||||||
|
lambda m: m.group(1), task.expected_output or ""
|
||||||
|
)
|
||||||
|
context_texts.append(f"Task Description: {task_description}")
|
||||||
|
context_texts.append(f"Expected Output: {expected_output}")
|
||||||
|
for agent in crew.agents:
|
||||||
|
if (
|
||||||
|
f"{{{input_name}}}" in agent.role
|
||||||
|
or f"{{{input_name}}}" in agent.goal
|
||||||
|
or f"{{{input_name}}}" in agent.backstory
|
||||||
|
):
|
||||||
|
# Replace placeholders with input names
|
||||||
|
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role or "")
|
||||||
|
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal or "")
|
||||||
|
agent_backstory = placeholder_pattern.sub(
|
||||||
|
lambda m: m.group(1), agent.backstory or ""
|
||||||
|
)
|
||||||
|
context_texts.append(f"Agent Role: {agent_role}")
|
||||||
|
context_texts.append(f"Agent Goal: {agent_goal}")
|
||||||
|
context_texts.append(f"Agent Backstory: {agent_backstory}")
|
||||||
|
|
||||||
|
context = "\n".join(context_texts)
|
||||||
|
if not context:
|
||||||
|
# If no context is found for the input, raise an exception as per instruction
|
||||||
|
raise ValueError(f"No context found for input '{input_name}'.")
|
||||||
|
|
||||||
|
prompt = (
|
||||||
|
f"Based on the following context, write a concise description (15 words or less) of the input '{input_name}'.\n"
|
||||||
|
"Provide only the description, without any extra text or labels. Do not include placeholders like '{topic}' in the description.\n"
|
||||||
|
"Context:\n"
|
||||||
|
f"{context}"
|
||||||
|
)
|
||||||
|
response = chat_llm.call(messages=[{"role": "user", "content": prompt}])
|
||||||
|
description = response.strip()
|
||||||
|
|
||||||
|
return description
|
||||||
|
|
||||||
|
|
||||||
|
def generate_crew_description_with_ai(crew: Crew, chat_llm) -> str:
|
||||||
|
"""
|
||||||
|
Generates a brief description of the crew using AI.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
crew (Crew): The crew object.
|
||||||
|
chat_llm: The chat language model to use for AI calls.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: A concise description of the crew's purpose (15 words or less).
|
||||||
|
"""
|
||||||
|
# Gather context from tasks and agents
|
||||||
|
context_texts = []
|
||||||
|
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||||
|
|
||||||
|
for task in crew.tasks:
|
||||||
|
# Replace placeholders with input names
|
||||||
|
task_description = placeholder_pattern.sub(
|
||||||
|
lambda m: m.group(1), task.description or ""
|
||||||
|
)
|
||||||
|
expected_output = placeholder_pattern.sub(
|
||||||
|
lambda m: m.group(1), task.expected_output or ""
|
||||||
|
)
|
||||||
|
context_texts.append(f"Task Description: {task_description}")
|
||||||
|
context_texts.append(f"Expected Output: {expected_output}")
|
||||||
|
for agent in crew.agents:
|
||||||
|
# Replace placeholders with input names
|
||||||
|
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role or "")
|
||||||
|
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal or "")
|
||||||
|
agent_backstory = placeholder_pattern.sub(
|
||||||
|
lambda m: m.group(1), agent.backstory or ""
|
||||||
|
)
|
||||||
|
context_texts.append(f"Agent Role: {agent_role}")
|
||||||
|
context_texts.append(f"Agent Goal: {agent_goal}")
|
||||||
|
context_texts.append(f"Agent Backstory: {agent_backstory}")
|
||||||
|
|
||||||
|
context = "\n".join(context_texts)
|
||||||
|
if not context:
|
||||||
|
raise ValueError("No context found for generating crew description.")
|
||||||
|
|
||||||
|
prompt = (
|
||||||
|
"Based on the following context, write a concise, action-oriented description (15 words or less) of the crew's purpose.\n"
|
||||||
|
"Provide only the description, without any extra text or labels. Do not include placeholders like '{topic}' in the description.\n"
|
||||||
|
"Context:\n"
|
||||||
|
f"{context}"
|
||||||
|
)
|
||||||
|
response = chat_llm.call(messages=[{"role": "user", "content": prompt}])
|
||||||
|
crew_description = response.strip()
|
||||||
|
|
||||||
|
return crew_description
|
||||||
@@ -2,11 +2,7 @@ import subprocess
|
|||||||
|
|
||||||
import click
|
import click
|
||||||
|
|
||||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
from crewai.cli.utils import get_crew
|
||||||
from crewai.memory.entity.entity_memory import EntityMemory
|
|
||||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
|
||||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
|
||||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
|
||||||
|
|
||||||
|
|
||||||
def reset_memories_command(
|
def reset_memories_command(
|
||||||
@@ -30,30 +26,35 @@ def reset_memories_command(
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
crew = get_crew()
|
||||||
|
if not crew:
|
||||||
|
raise ValueError("No crew found.")
|
||||||
if all:
|
if all:
|
||||||
ShortTermMemory().reset()
|
crew.reset_memories(command_type="all")
|
||||||
EntityMemory().reset()
|
|
||||||
LongTermMemory().reset()
|
|
||||||
TaskOutputStorageHandler().reset()
|
|
||||||
KnowledgeStorage().reset()
|
|
||||||
click.echo("All memories have been reset.")
|
click.echo("All memories have been reset.")
|
||||||
else:
|
return
|
||||||
if long:
|
|
||||||
LongTermMemory().reset()
|
|
||||||
click.echo("Long term memory has been reset.")
|
|
||||||
|
|
||||||
if short:
|
if not any([long, short, entity, kickoff_outputs, knowledge]):
|
||||||
ShortTermMemory().reset()
|
click.echo(
|
||||||
click.echo("Short term memory has been reset.")
|
"No memory type specified. Please specify at least one type to reset."
|
||||||
if entity:
|
)
|
||||||
EntityMemory().reset()
|
return
|
||||||
click.echo("Entity memory has been reset.")
|
|
||||||
if kickoff_outputs:
|
if long:
|
||||||
TaskOutputStorageHandler().reset()
|
crew.reset_memories(command_type="long")
|
||||||
click.echo("Latest Kickoff outputs stored has been reset.")
|
click.echo("Long term memory has been reset.")
|
||||||
if knowledge:
|
if short:
|
||||||
KnowledgeStorage().reset()
|
crew.reset_memories(command_type="short")
|
||||||
click.echo("Knowledge has been reset.")
|
click.echo("Short term memory has been reset.")
|
||||||
|
if entity:
|
||||||
|
crew.reset_memories(command_type="entity")
|
||||||
|
click.echo("Entity memory has been reset.")
|
||||||
|
if kickoff_outputs:
|
||||||
|
crew.reset_memories(command_type="kickoff_outputs")
|
||||||
|
click.echo("Latest Kickoff outputs stored has been reset.")
|
||||||
|
if knowledge:
|
||||||
|
crew.reset_memories(command_type="knowledge")
|
||||||
|
click.echo("Knowledge has been reset.")
|
||||||
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
click.echo(f"An error occurred while resetting the memories: {e}", err=True)
|
click.echo(f"An error occurred while resetting the memories: {e}", err=True)
|
||||||
|
|||||||
1
src/crewai/cli/templates/crew/.gitignore
vendored
1
src/crewai/cli/templates/crew/.gitignore
vendored
@@ -1,2 +1,3 @@
|
|||||||
.env
|
.env
|
||||||
__pycache__/
|
__pycache__/
|
||||||
|
.DS_Store
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ research_task:
|
|||||||
description: >
|
description: >
|
||||||
Conduct a thorough research about {topic}
|
Conduct a thorough research about {topic}
|
||||||
Make sure you find any interesting and relevant information given
|
Make sure you find any interesting and relevant information given
|
||||||
the current year is 2024.
|
the current year is {current_year}.
|
||||||
expected_output: >
|
expected_output: >
|
||||||
A list with 10 bullet points of the most relevant information about {topic}
|
A list with 10 bullet points of the most relevant information about {topic}
|
||||||
agent: researcher
|
agent: researcher
|
||||||
|
|||||||
@@ -2,6 +2,8 @@
|
|||||||
import sys
|
import sys
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
from {{folder_name}}.crew import {{crew_name}}
|
from {{folder_name}}.crew import {{crew_name}}
|
||||||
|
|
||||||
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
||||||
@@ -16,9 +18,14 @@ def run():
|
|||||||
Run the crew.
|
Run the crew.
|
||||||
"""
|
"""
|
||||||
inputs = {
|
inputs = {
|
||||||
'topic': 'AI LLMs'
|
'topic': 'AI LLMs',
|
||||||
|
'current_year': str(datetime.now().year)
|
||||||
}
|
}
|
||||||
{{crew_name}}().crew().kickoff(inputs=inputs)
|
|
||||||
|
try:
|
||||||
|
{{crew_name}}().crew().kickoff(inputs=inputs)
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception(f"An error occurred while running the crew: {e}")
|
||||||
|
|
||||||
|
|
||||||
def train():
|
def train():
|
||||||
@@ -55,4 +62,4 @@ def test():
|
|||||||
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
|
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise Exception(f"An error occurred while replaying the crew: {e}")
|
raise Exception(f"An error occurred while testing the crew: {e}")
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
|||||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||||
requires-python = ">=3.10,<3.13"
|
requires-python = ">=3.10,<3.13"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crewai[tools]>=0.86.0,<1.0.0"
|
"crewai[tools]>=0.100.1,<1.0.0"
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
|
|||||||
1
src/crewai/cli/templates/flow/.gitignore
vendored
1
src/crewai/cli/templates/flow/.gitignore
vendored
@@ -1,3 +1,4 @@
|
|||||||
.env
|
.env
|
||||||
__pycache__/
|
__pycache__/
|
||||||
lib/
|
lib/
|
||||||
|
.DS_Store
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from random import randint
|
|||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from crewai.flow.flow import Flow, listen, start
|
from crewai.flow import Flow, listen, start
|
||||||
|
|
||||||
from {{folder_name}}.crews.poem_crew.poem_crew import PoemCrew
|
from {{folder_name}}.crews.poem_crew.poem_crew import PoemCrew
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
|||||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||||
requires-python = ">=3.10,<3.13"
|
requires-python = ">=3.10,<3.13"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crewai[tools]>=0.86.0,<1.0.0",
|
"crewai[tools]>=0.100.1,<1.0.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
|||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.10,<3.13"
|
requires-python = ">=3.10,<3.13"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crewai[tools]>=0.86.0"
|
"crewai[tools]>=0.100.1"
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.crewai]
|
[tool.crewai]
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import tomli
|
|||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
|
|
||||||
from crewai.cli.constants import ENV_VARS
|
from crewai.cli.constants import ENV_VARS
|
||||||
|
from crewai.crew import Crew
|
||||||
|
|
||||||
if sys.version_info >= (3, 11):
|
if sys.version_info >= (3, 11):
|
||||||
import tomllib
|
import tomllib
|
||||||
@@ -247,3 +248,64 @@ def write_env_file(folder_path, env_vars):
|
|||||||
with open(env_file_path, "w") as file:
|
with open(env_file_path, "w") as file:
|
||||||
for key, value in env_vars.items():
|
for key, value in env_vars.items():
|
||||||
file.write(f"{key}={value}\n")
|
file.write(f"{key}={value}\n")
|
||||||
|
|
||||||
|
|
||||||
|
def get_crew(crew_path: str = "crew.py", require: bool = False) -> Crew | None:
|
||||||
|
"""Get the crew instance from the crew.py file."""
|
||||||
|
try:
|
||||||
|
import importlib.util
|
||||||
|
import os
|
||||||
|
|
||||||
|
for root, _, files in os.walk("."):
|
||||||
|
if "crew.py" in files:
|
||||||
|
crew_path = os.path.join(root, "crew.py")
|
||||||
|
try:
|
||||||
|
spec = importlib.util.spec_from_file_location(
|
||||||
|
"crew_module", crew_path
|
||||||
|
)
|
||||||
|
if not spec or not spec.loader:
|
||||||
|
continue
|
||||||
|
module = importlib.util.module_from_spec(spec)
|
||||||
|
try:
|
||||||
|
sys.modules[spec.name] = module
|
||||||
|
spec.loader.exec_module(module)
|
||||||
|
|
||||||
|
for attr_name in dir(module):
|
||||||
|
attr = getattr(module, attr_name)
|
||||||
|
try:
|
||||||
|
if callable(attr) and hasattr(attr, "crew"):
|
||||||
|
crew_instance = attr().crew()
|
||||||
|
return crew_instance
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error processing attribute {attr_name}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
except Exception as exec_error:
|
||||||
|
print(f"Error executing module: {exec_error}")
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
print(f"Traceback: {traceback.format_exc()}")
|
||||||
|
|
||||||
|
except (ImportError, AttributeError) as e:
|
||||||
|
if require:
|
||||||
|
console.print(
|
||||||
|
f"Error importing crew from {crew_path}: {str(e)}",
|
||||||
|
style="bold red",
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
break
|
||||||
|
|
||||||
|
if require:
|
||||||
|
console.print("No valid Crew instance found in crew.py", style="bold red")
|
||||||
|
raise SystemExit
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
if require:
|
||||||
|
console.print(
|
||||||
|
f"Unexpected error while loading crew: {str(e)}", style="bold red"
|
||||||
|
)
|
||||||
|
raise SystemExit
|
||||||
|
return None
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import uuid
|
|||||||
import warnings
|
import warnings
|
||||||
from concurrent.futures import Future
|
from concurrent.futures import Future
|
||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
|
||||||
|
|
||||||
from pydantic import (
|
from pydantic import (
|
||||||
UUID4,
|
UUID4,
|
||||||
@@ -16,7 +16,6 @@ from pydantic import (
|
|||||||
field_validator,
|
field_validator,
|
||||||
model_validator,
|
model_validator,
|
||||||
)
|
)
|
||||||
from pydantic_core import PydanticCustomError
|
|
||||||
|
|
||||||
from crewai.agent import Agent
|
from crewai.agent import Agent
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
@@ -45,6 +44,7 @@ from crewai.utilities.formatter import (
|
|||||||
aggregate_raw_outputs_from_task_outputs,
|
aggregate_raw_outputs_from_task_outputs,
|
||||||
aggregate_raw_outputs_from_tasks,
|
aggregate_raw_outputs_from_tasks,
|
||||||
)
|
)
|
||||||
|
from crewai.utilities.llm_utils import create_llm
|
||||||
from crewai.utilities.planning_handler import CrewPlanner
|
from crewai.utilities.planning_handler import CrewPlanner
|
||||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||||
@@ -81,6 +81,7 @@ class Crew(BaseModel):
|
|||||||
step_callback: Callback to be executed after each step for every agents execution.
|
step_callback: Callback to be executed after each step for every agents execution.
|
||||||
share_crew: Whether you want to share the complete crew information and execution with crewAI to make the library better, and allow us to train models.
|
share_crew: Whether you want to share the complete crew information and execution with crewAI to make the library better, and allow us to train models.
|
||||||
planning: Plan the crew execution and add the plan to the crew.
|
planning: Plan the crew execution and add the plan to the crew.
|
||||||
|
chat_llm: The language model used for orchestrating chat interactions with the crew.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__hash__ = object.__hash__ # type: ignore
|
__hash__ = object.__hash__ # type: ignore
|
||||||
@@ -147,7 +148,7 @@ class Crew(BaseModel):
|
|||||||
manager_agent: Optional[BaseAgent] = Field(
|
manager_agent: Optional[BaseAgent] = Field(
|
||||||
description="Custom agent that will be used as manager.", default=None
|
description="Custom agent that will be used as manager.", default=None
|
||||||
)
|
)
|
||||||
function_calling_llm: Optional[Any] = Field(
|
function_calling_llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
|
||||||
description="Language model that will run the agent.", default=None
|
description="Language model that will run the agent.", default=None
|
||||||
)
|
)
|
||||||
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
|
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
|
||||||
@@ -179,9 +180,9 @@ class Crew(BaseModel):
|
|||||||
default=None,
|
default=None,
|
||||||
description="Path to the prompt json file to be used for the crew.",
|
description="Path to the prompt json file to be used for the crew.",
|
||||||
)
|
)
|
||||||
output_log_file: Optional[str] = Field(
|
output_log_file: Optional[Union[bool, str]] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="output_log_file",
|
description="Path to the log file to be saved",
|
||||||
)
|
)
|
||||||
planning: Optional[bool] = Field(
|
planning: Optional[bool] = Field(
|
||||||
default=False,
|
default=False,
|
||||||
@@ -203,8 +204,13 @@ class Crew(BaseModel):
|
|||||||
default=None,
|
default=None,
|
||||||
description="Knowledge sources for the crew. Add knowledge sources to the knowledge object.",
|
description="Knowledge sources for the crew. Add knowledge sources to the knowledge object.",
|
||||||
)
|
)
|
||||||
_knowledge: Optional[Knowledge] = PrivateAttr(
|
chat_llm: Optional[Any] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
|
description="LLM used to handle chatting with the crew.",
|
||||||
|
)
|
||||||
|
knowledge: Optional[Knowledge] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Knowledge for the crew.",
|
||||||
)
|
)
|
||||||
|
|
||||||
@field_validator("id", mode="before")
|
@field_validator("id", mode="before")
|
||||||
@@ -239,15 +245,9 @@ class Crew(BaseModel):
|
|||||||
if self.output_log_file:
|
if self.output_log_file:
|
||||||
self._file_handler = FileHandler(self.output_log_file)
|
self._file_handler = FileHandler(self.output_log_file)
|
||||||
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
||||||
if self.function_calling_llm:
|
if self.function_calling_llm and not isinstance(self.function_calling_llm, LLM):
|
||||||
if isinstance(self.function_calling_llm, str):
|
self.function_calling_llm = create_llm(self.function_calling_llm)
|
||||||
self.function_calling_llm = LLM(model=self.function_calling_llm)
|
|
||||||
elif not isinstance(self.function_calling_llm, LLM):
|
|
||||||
self.function_calling_llm = LLM(
|
|
||||||
model=getattr(self.function_calling_llm, "model_name", None)
|
|
||||||
or getattr(self.function_calling_llm, "deployment_name", None)
|
|
||||||
or str(self.function_calling_llm)
|
|
||||||
)
|
|
||||||
self._telemetry = Telemetry()
|
self._telemetry = Telemetry()
|
||||||
self._telemetry.set_tracer()
|
self._telemetry.set_tracer()
|
||||||
return self
|
return self
|
||||||
@@ -288,9 +288,9 @@ class Crew(BaseModel):
|
|||||||
if isinstance(self.knowledge_sources, list) and all(
|
if isinstance(self.knowledge_sources, list) and all(
|
||||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||||
):
|
):
|
||||||
self._knowledge = Knowledge(
|
self.knowledge = Knowledge(
|
||||||
sources=self.knowledge_sources,
|
sources=self.knowledge_sources,
|
||||||
embedder_config=self.embedder,
|
embedder=self.embedder,
|
||||||
collection_name="crew",
|
collection_name="crew",
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -377,6 +377,22 @@ class Crew(BaseModel):
|
|||||||
|
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
@model_validator(mode="after")
|
||||||
|
def validate_must_have_non_conditional_task(self) -> "Crew":
|
||||||
|
"""Ensure that a crew has at least one non-conditional task."""
|
||||||
|
if not self.tasks:
|
||||||
|
return self
|
||||||
|
non_conditional_count = sum(
|
||||||
|
1 for task in self.tasks if not isinstance(task, ConditionalTask)
|
||||||
|
)
|
||||||
|
if non_conditional_count == 0:
|
||||||
|
raise PydanticCustomError(
|
||||||
|
"only_conditional_tasks",
|
||||||
|
"Crew must include at least one non-conditional task",
|
||||||
|
{},
|
||||||
|
)
|
||||||
|
return self
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def validate_first_task(self) -> "Crew":
|
def validate_first_task(self) -> "Crew":
|
||||||
"""Ensure the first task is not a ConditionalTask."""
|
"""Ensure the first task is not a ConditionalTask."""
|
||||||
@@ -436,6 +452,8 @@ class Crew(BaseModel):
|
|||||||
)
|
)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def key(self) -> str:
|
def key(self) -> str:
|
||||||
source = [agent.key for agent in self.agents] + [
|
source = [agent.key for agent in self.agents] + [
|
||||||
@@ -491,27 +509,34 @@ class Crew(BaseModel):
|
|||||||
train_crew = self.copy()
|
train_crew = self.copy()
|
||||||
train_crew._setup_for_training(filename)
|
train_crew._setup_for_training(filename)
|
||||||
|
|
||||||
for n_iteration in range(n_iterations):
|
try:
|
||||||
train_crew._train_iteration = n_iteration
|
for n_iteration in range(n_iterations):
|
||||||
train_crew.kickoff(inputs=inputs)
|
train_crew._train_iteration = n_iteration
|
||||||
|
train_crew.kickoff(inputs=inputs)
|
||||||
|
|
||||||
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||||
|
|
||||||
for agent in train_crew.agents:
|
for agent in train_crew.agents:
|
||||||
if training_data.get(str(agent.id)):
|
if training_data.get(str(agent.id)):
|
||||||
result = TaskEvaluator(agent).evaluate_training_data(
|
result = TaskEvaluator(agent).evaluate_training_data(
|
||||||
training_data=training_data, agent_id=str(agent.id)
|
training_data=training_data, agent_id=str(agent.id)
|
||||||
)
|
)
|
||||||
|
CrewTrainingHandler(filename).save_trained_data(
|
||||||
CrewTrainingHandler(filename).save_trained_data(
|
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
)
|
||||||
)
|
except Exception as e:
|
||||||
|
self._logger.log("error", f"Training failed: {e}", color="red")
|
||||||
|
CrewTrainingHandler(TRAINING_DATA_FILE).clear()
|
||||||
|
CrewTrainingHandler(filename).clear()
|
||||||
|
raise
|
||||||
|
|
||||||
def kickoff(
|
def kickoff(
|
||||||
self,
|
self,
|
||||||
inputs: Optional[Dict[str, Any]] = None,
|
inputs: Optional[Dict[str, Any]] = None,
|
||||||
) -> CrewOutput:
|
) -> CrewOutput:
|
||||||
for before_callback in self.before_kickoff_callbacks:
|
for before_callback in self.before_kickoff_callbacks:
|
||||||
|
if inputs is None:
|
||||||
|
inputs = {}
|
||||||
inputs = before_callback(inputs)
|
inputs = before_callback(inputs)
|
||||||
|
|
||||||
"""Starts the crew to work on its assigned tasks."""
|
"""Starts the crew to work on its assigned tasks."""
|
||||||
@@ -671,11 +696,7 @@ class Crew(BaseModel):
|
|||||||
manager.tools = []
|
manager.tools = []
|
||||||
raise Exception("Manager agent should not have tools")
|
raise Exception("Manager agent should not have tools")
|
||||||
else:
|
else:
|
||||||
self.manager_llm = (
|
self.manager_llm = create_llm(self.manager_llm)
|
||||||
getattr(self.manager_llm, "model_name", None)
|
|
||||||
or getattr(self.manager_llm, "deployment_name", None)
|
|
||||||
or self.manager_llm
|
|
||||||
)
|
|
||||||
manager = Agent(
|
manager = Agent(
|
||||||
role=i18n.retrieve("hierarchical_manager_agent", "role"),
|
role=i18n.retrieve("hierarchical_manager_agent", "role"),
|
||||||
goal=i18n.retrieve("hierarchical_manager_agent", "goal"),
|
goal=i18n.retrieve("hierarchical_manager_agent", "goal"),
|
||||||
@@ -735,6 +756,7 @@ class Crew(BaseModel):
|
|||||||
task, task_outputs, futures, task_index, was_replayed
|
task, task_outputs, futures, task_index, was_replayed
|
||||||
)
|
)
|
||||||
if skipped_task_output:
|
if skipped_task_output:
|
||||||
|
task_outputs.append(skipped_task_output)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if task.async_execution:
|
if task.async_execution:
|
||||||
@@ -758,7 +780,7 @@ class Crew(BaseModel):
|
|||||||
context=context,
|
context=context,
|
||||||
tools=tools_for_task,
|
tools=tools_for_task,
|
||||||
)
|
)
|
||||||
task_outputs = [task_output]
|
task_outputs.append(task_output)
|
||||||
self._process_task_result(task, task_output)
|
self._process_task_result(task, task_output)
|
||||||
self._store_execution_log(task, task_output, task_index, was_replayed)
|
self._store_execution_log(task, task_output, task_index, was_replayed)
|
||||||
|
|
||||||
@@ -779,7 +801,7 @@ class Crew(BaseModel):
|
|||||||
task_outputs = self._process_async_tasks(futures, was_replayed)
|
task_outputs = self._process_async_tasks(futures, was_replayed)
|
||||||
futures.clear()
|
futures.clear()
|
||||||
|
|
||||||
previous_output = task_outputs[task_index - 1] if task_outputs else None
|
previous_output = task_outputs[-1] if task_outputs else None
|
||||||
if previous_output is not None and not task.should_execute(previous_output):
|
if previous_output is not None and not task.should_execute(previous_output):
|
||||||
self._logger.log(
|
self._logger.log(
|
||||||
"debug",
|
"debug",
|
||||||
@@ -901,11 +923,15 @@ class Crew(BaseModel):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
|
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
|
||||||
if len(task_outputs) != 1:
|
if not task_outputs:
|
||||||
raise ValueError(
|
raise ValueError("No task outputs available to create crew output.")
|
||||||
"Something went wrong. Kickoff should return only one task output."
|
|
||||||
)
|
# Filter out empty outputs and get the last valid one as the main output
|
||||||
final_task_output = task_outputs[0]
|
valid_outputs = [t for t in task_outputs if t.raw]
|
||||||
|
if not valid_outputs:
|
||||||
|
raise ValueError("No valid task outputs available to create crew output.")
|
||||||
|
final_task_output = valid_outputs[-1]
|
||||||
|
|
||||||
final_string_output = final_task_output.raw
|
final_string_output = final_task_output.raw
|
||||||
self._finish_execution(final_string_output)
|
self._finish_execution(final_string_output)
|
||||||
token_usage = self.calculate_usage_metrics()
|
token_usage = self.calculate_usage_metrics()
|
||||||
@@ -914,7 +940,7 @@ class Crew(BaseModel):
|
|||||||
raw=final_task_output.raw,
|
raw=final_task_output.raw,
|
||||||
pydantic=final_task_output.pydantic,
|
pydantic=final_task_output.pydantic,
|
||||||
json_dict=final_task_output.json_dict,
|
json_dict=final_task_output.json_dict,
|
||||||
tasks_output=[task.output for task in self.tasks if task.output],
|
tasks_output=task_outputs,
|
||||||
token_usage=token_usage,
|
token_usage=token_usage,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -987,10 +1013,35 @@ class Crew(BaseModel):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
def query_knowledge(self, query: List[str]) -> Union[List[Dict[str, Any]], None]:
|
def query_knowledge(self, query: List[str]) -> Union[List[Dict[str, Any]], None]:
|
||||||
if self._knowledge:
|
if self.knowledge:
|
||||||
return self._knowledge.query(query)
|
return self.knowledge.query(query)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def fetch_inputs(self) -> Set[str]:
|
||||||
|
"""
|
||||||
|
Gathers placeholders (e.g., {something}) referenced in tasks or agents.
|
||||||
|
Scans each task's 'description' + 'expected_output', and each agent's
|
||||||
|
'role', 'goal', and 'backstory'.
|
||||||
|
|
||||||
|
Returns a set of all discovered placeholder names.
|
||||||
|
"""
|
||||||
|
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||||
|
required_inputs: Set[str] = set()
|
||||||
|
|
||||||
|
# Scan tasks for inputs
|
||||||
|
for task in self.tasks:
|
||||||
|
# description and expected_output might contain e.g. {topic}, {user_name}, etc.
|
||||||
|
text = f"{task.description or ''} {task.expected_output or ''}"
|
||||||
|
required_inputs.update(placeholder_pattern.findall(text))
|
||||||
|
|
||||||
|
# Scan agents for inputs
|
||||||
|
for agent in self.agents:
|
||||||
|
# role, goal, backstory might have placeholders like {role_detail}, etc.
|
||||||
|
text = f"{agent.role or ''} {agent.goal or ''} {agent.backstory or ''}"
|
||||||
|
required_inputs.update(placeholder_pattern.findall(text))
|
||||||
|
|
||||||
|
return required_inputs
|
||||||
|
|
||||||
def copy(self):
|
def copy(self):
|
||||||
"""Create a deep copy of the Crew."""
|
"""Create a deep copy of the Crew."""
|
||||||
|
|
||||||
@@ -1007,6 +1058,8 @@ class Crew(BaseModel):
|
|||||||
"_telemetry",
|
"_telemetry",
|
||||||
"agents",
|
"agents",
|
||||||
"tasks",
|
"tasks",
|
||||||
|
"knowledge_sources",
|
||||||
|
"knowledge",
|
||||||
}
|
}
|
||||||
|
|
||||||
cloned_agents = [agent.copy() for agent in self.agents]
|
cloned_agents = [agent.copy() for agent in self.agents]
|
||||||
@@ -1014,6 +1067,9 @@ class Crew(BaseModel):
|
|||||||
task_mapping = {}
|
task_mapping = {}
|
||||||
|
|
||||||
cloned_tasks = []
|
cloned_tasks = []
|
||||||
|
existing_knowledge_sources = shallow_copy(self.knowledge_sources)
|
||||||
|
existing_knowledge = shallow_copy(self.knowledge)
|
||||||
|
|
||||||
for task in self.tasks:
|
for task in self.tasks:
|
||||||
cloned_task = task.copy(cloned_agents, task_mapping)
|
cloned_task = task.copy(cloned_agents, task_mapping)
|
||||||
cloned_tasks.append(cloned_task)
|
cloned_tasks.append(cloned_task)
|
||||||
@@ -1033,7 +1089,13 @@ class Crew(BaseModel):
|
|||||||
copied_data.pop("agents", None)
|
copied_data.pop("agents", None)
|
||||||
copied_data.pop("tasks", None)
|
copied_data.pop("tasks", None)
|
||||||
|
|
||||||
copied_crew = Crew(**copied_data, agents=cloned_agents, tasks=cloned_tasks)
|
copied_crew = Crew(
|
||||||
|
**copied_data,
|
||||||
|
agents=cloned_agents,
|
||||||
|
tasks=cloned_tasks,
|
||||||
|
knowledge_sources=existing_knowledge_sources,
|
||||||
|
knowledge=existing_knowledge,
|
||||||
|
)
|
||||||
|
|
||||||
return copied_crew
|
return copied_crew
|
||||||
|
|
||||||
@@ -1046,7 +1108,7 @@ class Crew(BaseModel):
|
|||||||
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||||
"""Interpolates the inputs in the tasks and agents."""
|
"""Interpolates the inputs in the tasks and agents."""
|
||||||
[
|
[
|
||||||
task.interpolate_inputs(
|
task.interpolate_inputs_and_add_conversation_history(
|
||||||
# type: ignore # "interpolate_inputs" of "Task" does not return a value (it only ever returns None)
|
# type: ignore # "interpolate_inputs" of "Task" does not return a value (it only ever returns None)
|
||||||
inputs
|
inputs
|
||||||
)
|
)
|
||||||
@@ -1105,3 +1167,80 @@ class Crew(BaseModel):
|
|||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
|
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
|
||||||
|
|
||||||
|
def reset_memories(self, command_type: str) -> None:
|
||||||
|
"""Reset specific or all memories for the crew.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
command_type: Type of memory to reset.
|
||||||
|
Valid options: 'long', 'short', 'entity', 'knowledge',
|
||||||
|
'kickoff_outputs', or 'all'
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If an invalid command type is provided.
|
||||||
|
RuntimeError: If memory reset operation fails.
|
||||||
|
"""
|
||||||
|
VALID_TYPES = frozenset(
|
||||||
|
["long", "short", "entity", "knowledge", "kickoff_outputs", "all"]
|
||||||
|
)
|
||||||
|
|
||||||
|
if command_type not in VALID_TYPES:
|
||||||
|
raise ValueError(
|
||||||
|
f"Invalid command type. Must be one of: {', '.join(sorted(VALID_TYPES))}"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if command_type == "all":
|
||||||
|
self._reset_all_memories()
|
||||||
|
else:
|
||||||
|
self._reset_specific_memory(command_type)
|
||||||
|
|
||||||
|
self._logger.log("info", f"{command_type} memory has been reset")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"Failed to reset {command_type} memory: {str(e)}"
|
||||||
|
self._logger.log("error", error_msg)
|
||||||
|
raise RuntimeError(error_msg) from e
|
||||||
|
|
||||||
|
def _reset_all_memories(self) -> None:
|
||||||
|
"""Reset all available memory systems."""
|
||||||
|
memory_systems = [
|
||||||
|
("short term", self._short_term_memory),
|
||||||
|
("entity", self._entity_memory),
|
||||||
|
("long term", self._long_term_memory),
|
||||||
|
("task output", self._task_output_handler),
|
||||||
|
("knowledge", self.knowledge),
|
||||||
|
]
|
||||||
|
|
||||||
|
for name, system in memory_systems:
|
||||||
|
if system is not None:
|
||||||
|
try:
|
||||||
|
system.reset()
|
||||||
|
except Exception as e:
|
||||||
|
raise RuntimeError(f"Failed to reset {name} memory") from e
|
||||||
|
|
||||||
|
def _reset_specific_memory(self, memory_type: str) -> None:
|
||||||
|
"""Reset a specific memory system.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
memory_type: Type of memory to reset
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: If the specified memory system fails to reset
|
||||||
|
"""
|
||||||
|
reset_functions = {
|
||||||
|
"long": (self._long_term_memory, "long term"),
|
||||||
|
"short": (self._short_term_memory, "short term"),
|
||||||
|
"entity": (self._entity_memory, "entity"),
|
||||||
|
"knowledge": (self.knowledge, "knowledge"),
|
||||||
|
"kickoff_outputs": (self._task_output_handler, "task output"),
|
||||||
|
}
|
||||||
|
|
||||||
|
memory_system, name = reset_functions[memory_type]
|
||||||
|
if memory_system is None:
|
||||||
|
raise RuntimeError(f"{name} memory system is not initialized")
|
||||||
|
|
||||||
|
try:
|
||||||
|
memory_system.reset()
|
||||||
|
except Exception as e:
|
||||||
|
raise RuntimeError(f"Failed to reset {name} memory") from e
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
import json
|
import json
|
||||||
from typing import Any, Dict, Optional
|
from typing import Any, Callable, Dict, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
from pydantic.main import IncEx
|
||||||
|
from typing_extensions import Literal
|
||||||
|
|
||||||
from crewai.tasks.output_format import OutputFormat
|
from crewai.tasks.output_format import OutputFormat
|
||||||
from crewai.tasks.task_output import TaskOutput
|
from crewai.tasks.task_output import TaskOutput
|
||||||
@@ -21,16 +23,45 @@ class CrewOutput(BaseModel):
|
|||||||
tasks_output: list[TaskOutput] = Field(
|
tasks_output: list[TaskOutput] = Field(
|
||||||
description="Output of each task", default=[]
|
description="Output of each task", default=[]
|
||||||
)
|
)
|
||||||
token_usage: UsageMetrics = Field(description="Processed token summary", default={})
|
token_usage: UsageMetrics = Field(description="Processed token summary", default_factory=UsageMetrics)
|
||||||
|
|
||||||
@property
|
def model_json(self) -> str:
|
||||||
def json(self) -> Optional[str]:
|
"""Get the JSON representation of the output."""
|
||||||
if self.tasks_output[-1].output_format != OutputFormat.JSON:
|
if self.tasks_output and self.tasks_output[-1].output_format != OutputFormat.JSON:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"No JSON output found in the final task. Please make sure to set the output_json property in the final task in your crew."
|
"No JSON output found in the final task. Please make sure to set the output_json property in the final task in your crew."
|
||||||
)
|
)
|
||||||
|
return json.dumps(self.json_dict) if self.json_dict else "{}"
|
||||||
|
|
||||||
return json.dumps(self.json_dict)
|
def model_dump_json(
|
||||||
|
self,
|
||||||
|
*,
|
||||||
|
indent: Optional[int] = None,
|
||||||
|
include: Optional[IncEx] = None,
|
||||||
|
exclude: Optional[IncEx] = None,
|
||||||
|
context: Optional[Any] = None,
|
||||||
|
by_alias: bool = False,
|
||||||
|
exclude_unset: bool = False,
|
||||||
|
exclude_defaults: bool = False,
|
||||||
|
exclude_none: bool = False,
|
||||||
|
round_trip: bool = False,
|
||||||
|
warnings: bool | Literal["none", "warn", "error"] = False,
|
||||||
|
serialize_as_any: bool = False,
|
||||||
|
) -> str:
|
||||||
|
"""Override model_dump_json to handle custom JSON output."""
|
||||||
|
return super().model_dump_json(
|
||||||
|
indent=indent,
|
||||||
|
include=include,
|
||||||
|
exclude=exclude,
|
||||||
|
context=context,
|
||||||
|
by_alias=by_alias,
|
||||||
|
exclude_unset=exclude_unset,
|
||||||
|
exclude_defaults=exclude_defaults,
|
||||||
|
exclude_none=exclude_none,
|
||||||
|
round_trip=round_trip,
|
||||||
|
warnings=warnings,
|
||||||
|
serialize_as_any=serialize_as_any,
|
||||||
|
)
|
||||||
|
|
||||||
def to_dict(self) -> Dict[str, Any]:
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
"""Convert json_output and pydantic_output to a dictionary."""
|
"""Convert json_output and pydantic_output to a dictionary."""
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
from crewai.flow.flow import Flow
|
from crewai.flow.flow import Flow, start, listen, or_, and_, router
|
||||||
|
from crewai.flow.persistence import persist
|
||||||
|
|
||||||
|
__all__ = ["Flow", "start", "listen", "or_", "and_", "router", "persist"]
|
||||||
|
|
||||||
__all__ = ["Flow"]
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import inspect
|
import inspect
|
||||||
|
import logging
|
||||||
from typing import (
|
from typing import (
|
||||||
Any,
|
Any,
|
||||||
Callable,
|
Callable,
|
||||||
@@ -13,9 +14,10 @@ from typing import (
|
|||||||
Union,
|
Union,
|
||||||
cast,
|
cast,
|
||||||
)
|
)
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
from blinker import Signal
|
from blinker import Signal
|
||||||
from pydantic import BaseModel, ValidationError
|
from pydantic import BaseModel, Field, ValidationError
|
||||||
|
|
||||||
from crewai.flow.flow_events import (
|
from crewai.flow.flow_events import (
|
||||||
FlowFinishedEvent,
|
FlowFinishedEvent,
|
||||||
@@ -24,10 +26,70 @@ from crewai.flow.flow_events import (
|
|||||||
MethodExecutionStartedEvent,
|
MethodExecutionStartedEvent,
|
||||||
)
|
)
|
||||||
from crewai.flow.flow_visualizer import plot_flow
|
from crewai.flow.flow_visualizer import plot_flow
|
||||||
|
from crewai.flow.persistence.base import FlowPersistence
|
||||||
from crewai.flow.utils import get_possible_return_constants
|
from crewai.flow.utils import get_possible_return_constants
|
||||||
from crewai.telemetry import Telemetry
|
from crewai.telemetry import Telemetry
|
||||||
|
from crewai.utilities.printer import Printer
|
||||||
|
|
||||||
T = TypeVar("T", bound=Union[BaseModel, Dict[str, Any]])
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class FlowState(BaseModel):
|
||||||
|
"""Base model for all flow states, ensuring each state has a unique ID."""
|
||||||
|
|
||||||
|
id: str = Field(
|
||||||
|
default_factory=lambda: str(uuid4()),
|
||||||
|
description="Unique identifier for the flow state",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Type variables with explicit bounds
|
||||||
|
T = TypeVar(
|
||||||
|
"T", bound=Union[Dict[str, Any], BaseModel]
|
||||||
|
) # Generic flow state type parameter
|
||||||
|
StateT = TypeVar(
|
||||||
|
"StateT", bound=Union[Dict[str, Any], BaseModel]
|
||||||
|
) # State validation type parameter
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_state_type(state: Any, expected_type: Type[StateT]) -> StateT:
|
||||||
|
"""Ensure state matches expected type with proper validation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: State instance to validate
|
||||||
|
expected_type: Expected type for the state
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Validated state instance
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
TypeError: If state doesn't match expected type
|
||||||
|
ValueError: If state validation fails
|
||||||
|
"""
|
||||||
|
"""Ensure state matches expected type with proper validation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: State instance to validate
|
||||||
|
expected_type: Expected type for the state
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Validated state instance
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
TypeError: If state doesn't match expected type
|
||||||
|
ValueError: If state validation fails
|
||||||
|
"""
|
||||||
|
if expected_type is dict:
|
||||||
|
if not isinstance(state, dict):
|
||||||
|
raise TypeError(f"Expected dict, got {type(state).__name__}")
|
||||||
|
return cast(StateT, state)
|
||||||
|
if isinstance(expected_type, type) and issubclass(expected_type, BaseModel):
|
||||||
|
if not isinstance(state, expected_type):
|
||||||
|
raise TypeError(
|
||||||
|
f"Expected {expected_type.__name__}, got {type(state).__name__}"
|
||||||
|
)
|
||||||
|
return cast(StateT, state)
|
||||||
|
raise TypeError(f"Invalid expected_type: {expected_type}")
|
||||||
|
|
||||||
|
|
||||||
def start(condition: Optional[Union[str, dict, Callable]] = None) -> Callable:
|
def start(condition: Optional[Union[str, dict, Callable]] = None) -> Callable:
|
||||||
@@ -71,6 +133,7 @@ def start(condition: Optional[Union[str, dict, Callable]] = None) -> Callable:
|
|||||||
>>> def complex_start(self):
|
>>> def complex_start(self):
|
||||||
... pass
|
... pass
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def decorator(func):
|
def decorator(func):
|
||||||
func.__is_start_method__ = True
|
func.__is_start_method__ = True
|
||||||
if condition is not None:
|
if condition is not None:
|
||||||
@@ -95,6 +158,7 @@ def start(condition: Optional[Union[str, dict, Callable]] = None) -> Callable:
|
|||||||
|
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
def listen(condition: Union[str, dict, Callable]) -> Callable:
|
def listen(condition: Union[str, dict, Callable]) -> Callable:
|
||||||
"""
|
"""
|
||||||
Creates a listener that executes when specified conditions are met.
|
Creates a listener that executes when specified conditions are met.
|
||||||
@@ -131,6 +195,7 @@ def listen(condition: Union[str, dict, Callable]) -> Callable:
|
|||||||
>>> def handle_completion(self):
|
>>> def handle_completion(self):
|
||||||
... pass
|
... pass
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def decorator(func):
|
def decorator(func):
|
||||||
if isinstance(condition, str):
|
if isinstance(condition, str):
|
||||||
func.__trigger_methods__ = [condition]
|
func.__trigger_methods__ = [condition]
|
||||||
@@ -195,6 +260,7 @@ def router(condition: Union[str, dict, Callable]) -> Callable:
|
|||||||
... return CONTINUE
|
... return CONTINUE
|
||||||
... return STOP
|
... return STOP
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def decorator(func):
|
def decorator(func):
|
||||||
func.__is_router__ = True
|
func.__is_router__ = True
|
||||||
if isinstance(condition, str):
|
if isinstance(condition, str):
|
||||||
@@ -218,6 +284,7 @@ def router(condition: Union[str, dict, Callable]) -> Callable:
|
|||||||
|
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
def or_(*conditions: Union[str, dict, Callable]) -> dict:
|
def or_(*conditions: Union[str, dict, Callable]) -> dict:
|
||||||
"""
|
"""
|
||||||
Combines multiple conditions with OR logic for flow control.
|
Combines multiple conditions with OR logic for flow control.
|
||||||
@@ -320,21 +387,32 @@ class FlowMeta(type):
|
|||||||
routers = set()
|
routers = set()
|
||||||
|
|
||||||
for attr_name, attr_value in dct.items():
|
for attr_name, attr_value in dct.items():
|
||||||
if hasattr(attr_value, "__is_start_method__"):
|
# Check for any flow-related attributes
|
||||||
start_methods.append(attr_name)
|
if (
|
||||||
|
hasattr(attr_value, "__is_flow_method__")
|
||||||
|
or hasattr(attr_value, "__is_start_method__")
|
||||||
|
or hasattr(attr_value, "__trigger_methods__")
|
||||||
|
or hasattr(attr_value, "__is_router__")
|
||||||
|
):
|
||||||
|
|
||||||
|
# Register start methods
|
||||||
|
if hasattr(attr_value, "__is_start_method__"):
|
||||||
|
start_methods.append(attr_name)
|
||||||
|
|
||||||
|
# Register listeners and routers
|
||||||
if hasattr(attr_value, "__trigger_methods__"):
|
if hasattr(attr_value, "__trigger_methods__"):
|
||||||
methods = attr_value.__trigger_methods__
|
methods = attr_value.__trigger_methods__
|
||||||
condition_type = getattr(attr_value, "__condition_type__", "OR")
|
condition_type = getattr(attr_value, "__condition_type__", "OR")
|
||||||
listeners[attr_name] = (condition_type, methods)
|
listeners[attr_name] = (condition_type, methods)
|
||||||
elif hasattr(attr_value, "__trigger_methods__"):
|
|
||||||
methods = attr_value.__trigger_methods__
|
if (
|
||||||
condition_type = getattr(attr_value, "__condition_type__", "OR")
|
hasattr(attr_value, "__is_router__")
|
||||||
listeners[attr_name] = (condition_type, methods)
|
and attr_value.__is_router__
|
||||||
if hasattr(attr_value, "__is_router__") and attr_value.__is_router__:
|
):
|
||||||
routers.add(attr_name)
|
routers.add(attr_name)
|
||||||
possible_returns = get_possible_return_constants(attr_value)
|
possible_returns = get_possible_return_constants(attr_value)
|
||||||
if possible_returns:
|
if possible_returns:
|
||||||
router_paths[attr_name] = possible_returns
|
router_paths[attr_name] = possible_returns
|
||||||
|
|
||||||
setattr(cls, "_start_methods", start_methods)
|
setattr(cls, "_start_methods", start_methods)
|
||||||
setattr(cls, "_listeners", listeners)
|
setattr(cls, "_listeners", listeners)
|
||||||
@@ -345,7 +423,12 @@ class FlowMeta(type):
|
|||||||
|
|
||||||
|
|
||||||
class Flow(Generic[T], metaclass=FlowMeta):
|
class Flow(Generic[T], metaclass=FlowMeta):
|
||||||
|
"""Base class for all flows.
|
||||||
|
|
||||||
|
Type parameter T must be either Dict[str, Any] or a subclass of BaseModel."""
|
||||||
|
|
||||||
_telemetry = Telemetry()
|
_telemetry = Telemetry()
|
||||||
|
_printer = Printer()
|
||||||
|
|
||||||
_start_methods: List[str] = []
|
_start_methods: List[str] = []
|
||||||
_listeners: Dict[str, tuple[str, List[str]]] = {}
|
_listeners: Dict[str, tuple[str, List[str]]] = {}
|
||||||
@@ -361,30 +444,130 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
_FlowGeneric.__name__ = f"{cls.__name__}[{item.__name__}]"
|
_FlowGeneric.__name__ = f"{cls.__name__}[{item.__name__}]"
|
||||||
return _FlowGeneric
|
return _FlowGeneric
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(
|
||||||
|
self,
|
||||||
|
persistence: Optional[FlowPersistence] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> None:
|
||||||
|
"""Initialize a new Flow instance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
persistence: Optional persistence backend for storing flow states
|
||||||
|
**kwargs: Additional state values to initialize or override
|
||||||
|
"""
|
||||||
|
# Initialize basic instance attributes
|
||||||
self._methods: Dict[str, Callable] = {}
|
self._methods: Dict[str, Callable] = {}
|
||||||
self._state: T = self._create_initial_state()
|
|
||||||
self._method_execution_counts: Dict[str, int] = {}
|
self._method_execution_counts: Dict[str, int] = {}
|
||||||
self._pending_and_listeners: Dict[str, Set[str]] = {}
|
self._pending_and_listeners: Dict[str, Set[str]] = {}
|
||||||
self._method_outputs: List[Any] = [] # List to store all method outputs
|
self._method_outputs: List[Any] = [] # List to store all method outputs
|
||||||
|
self._persistence: Optional[FlowPersistence] = persistence
|
||||||
|
|
||||||
|
# Initialize state with initial values
|
||||||
|
self._state = self._create_initial_state()
|
||||||
|
|
||||||
|
# Apply any additional kwargs
|
||||||
|
if kwargs:
|
||||||
|
self._initialize_state(kwargs)
|
||||||
|
|
||||||
self._telemetry.flow_creation_span(self.__class__.__name__)
|
self._telemetry.flow_creation_span(self.__class__.__name__)
|
||||||
|
|
||||||
|
# Register all flow-related methods
|
||||||
for method_name in dir(self):
|
for method_name in dir(self):
|
||||||
if callable(getattr(self, method_name)) and not method_name.startswith(
|
if not method_name.startswith("_"):
|
||||||
"__"
|
method = getattr(self, method_name)
|
||||||
):
|
# Check for any flow-related attributes
|
||||||
self._methods[method_name] = getattr(self, method_name)
|
if (
|
||||||
|
hasattr(method, "__is_flow_method__")
|
||||||
|
or hasattr(method, "__is_start_method__")
|
||||||
|
or hasattr(method, "__trigger_methods__")
|
||||||
|
or hasattr(method, "__is_router__")
|
||||||
|
):
|
||||||
|
# Ensure method is bound to this instance
|
||||||
|
if not hasattr(method, "__self__"):
|
||||||
|
method = method.__get__(self, self.__class__)
|
||||||
|
self._methods[method_name] = method
|
||||||
|
|
||||||
def _create_initial_state(self) -> T:
|
def _create_initial_state(self) -> T:
|
||||||
|
"""Create and initialize flow state with UUID and default values.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
New state instance with UUID and default values initialized
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If structured state model lacks 'id' field
|
||||||
|
TypeError: If state is neither BaseModel nor dictionary
|
||||||
|
"""
|
||||||
|
# Handle case where initial_state is None but we have a type parameter
|
||||||
if self.initial_state is None and hasattr(self, "_initial_state_T"):
|
if self.initial_state is None and hasattr(self, "_initial_state_T"):
|
||||||
return self._initial_state_T() # type: ignore
|
state_type = getattr(self, "_initial_state_T")
|
||||||
|
if isinstance(state_type, type):
|
||||||
|
if issubclass(state_type, FlowState):
|
||||||
|
# Create instance without id, then set it
|
||||||
|
instance = state_type()
|
||||||
|
if not hasattr(instance, "id"):
|
||||||
|
setattr(instance, "id", str(uuid4()))
|
||||||
|
return cast(T, instance)
|
||||||
|
elif issubclass(state_type, BaseModel):
|
||||||
|
# Create a new type that includes the ID field
|
||||||
|
class StateWithId(state_type, FlowState): # type: ignore
|
||||||
|
pass
|
||||||
|
|
||||||
|
instance = StateWithId()
|
||||||
|
if not hasattr(instance, "id"):
|
||||||
|
setattr(instance, "id", str(uuid4()))
|
||||||
|
return cast(T, instance)
|
||||||
|
elif state_type is dict:
|
||||||
|
return cast(T, {"id": str(uuid4())})
|
||||||
|
|
||||||
|
# Handle case where no initial state is provided
|
||||||
if self.initial_state is None:
|
if self.initial_state is None:
|
||||||
return {} # type: ignore
|
return cast(T, {"id": str(uuid4())})
|
||||||
elif isinstance(self.initial_state, type):
|
|
||||||
return self.initial_state()
|
# Handle case where initial_state is a type (class)
|
||||||
else:
|
if isinstance(self.initial_state, type):
|
||||||
return self.initial_state
|
if issubclass(self.initial_state, FlowState):
|
||||||
|
return cast(T, self.initial_state()) # Uses model defaults
|
||||||
|
elif issubclass(self.initial_state, BaseModel):
|
||||||
|
# Validate that the model has an id field
|
||||||
|
model_fields = getattr(self.initial_state, "model_fields", None)
|
||||||
|
if not model_fields or "id" not in model_fields:
|
||||||
|
raise ValueError("Flow state model must have an 'id' field")
|
||||||
|
return cast(T, self.initial_state()) # Uses model defaults
|
||||||
|
elif self.initial_state is dict:
|
||||||
|
return cast(T, {"id": str(uuid4())})
|
||||||
|
|
||||||
|
# Handle dictionary instance case
|
||||||
|
if isinstance(self.initial_state, dict):
|
||||||
|
new_state = dict(self.initial_state) # Copy to avoid mutations
|
||||||
|
if "id" not in new_state:
|
||||||
|
new_state["id"] = str(uuid4())
|
||||||
|
return cast(T, new_state)
|
||||||
|
|
||||||
|
# Handle BaseModel instance case
|
||||||
|
if isinstance(self.initial_state, BaseModel):
|
||||||
|
model = cast(BaseModel, self.initial_state)
|
||||||
|
if not hasattr(model, "id"):
|
||||||
|
raise ValueError("Flow state model must have an 'id' field")
|
||||||
|
|
||||||
|
# Create new instance with same values to avoid mutations
|
||||||
|
if hasattr(model, "model_dump"):
|
||||||
|
# Pydantic v2
|
||||||
|
state_dict = model.model_dump()
|
||||||
|
elif hasattr(model, "dict"):
|
||||||
|
# Pydantic v1
|
||||||
|
state_dict = model.dict()
|
||||||
|
else:
|
||||||
|
# Fallback for other BaseModel implementations
|
||||||
|
state_dict = {
|
||||||
|
k: v for k, v in model.__dict__.items() if not k.startswith("_")
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create new instance of the same class
|
||||||
|
model_class = type(model)
|
||||||
|
return cast(T, model_class(**state_dict))
|
||||||
|
raise TypeError(
|
||||||
|
f"Initial state must be dict or BaseModel, got {type(self.initial_state)}"
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def state(self) -> T:
|
def state(self) -> T:
|
||||||
@@ -395,34 +578,163 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
"""Returns the list of all outputs from executed methods."""
|
"""Returns the list of all outputs from executed methods."""
|
||||||
return self._method_outputs
|
return self._method_outputs
|
||||||
|
|
||||||
|
@property
|
||||||
|
def flow_id(self) -> str:
|
||||||
|
"""Returns the unique identifier of this flow instance.
|
||||||
|
|
||||||
|
This property provides a consistent way to access the flow's unique identifier
|
||||||
|
regardless of the underlying state implementation (dict or BaseModel).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The flow's unique identifier, or an empty string if not found
|
||||||
|
|
||||||
|
Note:
|
||||||
|
This property safely handles both dictionary and BaseModel state types,
|
||||||
|
returning an empty string if the ID cannot be retrieved rather than raising
|
||||||
|
an exception.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
flow = MyFlow()
|
||||||
|
print(f"Current flow ID: {flow.flow_id}") # Safely get flow ID
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if not hasattr(self, "_state"):
|
||||||
|
return ""
|
||||||
|
|
||||||
|
if isinstance(self._state, dict):
|
||||||
|
return str(self._state.get("id", ""))
|
||||||
|
elif isinstance(self._state, BaseModel):
|
||||||
|
return str(getattr(self._state, "id", ""))
|
||||||
|
return ""
|
||||||
|
except (AttributeError, TypeError):
|
||||||
|
return "" # Safely handle any unexpected attribute access issues
|
||||||
|
|
||||||
def _initialize_state(self, inputs: Dict[str, Any]) -> None:
|
def _initialize_state(self, inputs: Dict[str, Any]) -> None:
|
||||||
if isinstance(self._state, BaseModel):
|
"""Initialize or update flow state with new inputs.
|
||||||
# Structured state
|
|
||||||
|
Args:
|
||||||
|
inputs: Dictionary of state values to set/update
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If validation fails for structured state
|
||||||
|
TypeError: If state is neither BaseModel nor dictionary
|
||||||
|
"""
|
||||||
|
if isinstance(self._state, dict):
|
||||||
|
# For dict states, preserve existing fields unless overridden
|
||||||
|
current_id = self._state.get("id")
|
||||||
|
# Only update specified fields
|
||||||
|
for k, v in inputs.items():
|
||||||
|
self._state[k] = v
|
||||||
|
# Ensure ID is preserved or generated
|
||||||
|
if current_id:
|
||||||
|
self._state["id"] = current_id
|
||||||
|
elif "id" not in self._state:
|
||||||
|
self._state["id"] = str(uuid4())
|
||||||
|
elif isinstance(self._state, BaseModel):
|
||||||
|
# For BaseModel states, preserve existing fields unless overridden
|
||||||
try:
|
try:
|
||||||
|
model = cast(BaseModel, self._state)
|
||||||
|
# Get current state as dict
|
||||||
|
if hasattr(model, "model_dump"):
|
||||||
|
current_state = model.model_dump()
|
||||||
|
elif hasattr(model, "dict"):
|
||||||
|
current_state = model.dict()
|
||||||
|
else:
|
||||||
|
current_state = {
|
||||||
|
k: v for k, v in model.__dict__.items() if not k.startswith("_")
|
||||||
|
}
|
||||||
|
|
||||||
def create_model_with_extra_forbid(
|
# Create new state with preserved fields and updates
|
||||||
base_model: Type[BaseModel],
|
new_state = {**current_state, **inputs}
|
||||||
) -> Type[BaseModel]:
|
|
||||||
class ModelWithExtraForbid(base_model): # type: ignore
|
|
||||||
model_config = base_model.model_config.copy()
|
|
||||||
model_config["extra"] = "forbid"
|
|
||||||
|
|
||||||
return ModelWithExtraForbid
|
# Create new instance with merged state
|
||||||
|
model_class = type(model)
|
||||||
ModelWithExtraForbid = create_model_with_extra_forbid(
|
if hasattr(model_class, "model_validate"):
|
||||||
self._state.__class__
|
# Pydantic v2
|
||||||
)
|
self._state = cast(T, model_class.model_validate(new_state))
|
||||||
self._state = cast(
|
elif hasattr(model_class, "parse_obj"):
|
||||||
T, ModelWithExtraForbid(**{**self._state.model_dump(), **inputs})
|
# Pydantic v1
|
||||||
)
|
self._state = cast(T, model_class.parse_obj(new_state))
|
||||||
|
else:
|
||||||
|
# Fallback for other BaseModel implementations
|
||||||
|
self._state = cast(T, model_class(**new_state))
|
||||||
except ValidationError as e:
|
except ValidationError as e:
|
||||||
raise ValueError(f"Invalid inputs for structured state: {e}") from e
|
raise ValueError(f"Invalid inputs for structured state: {e}") from e
|
||||||
elif isinstance(self._state, dict):
|
|
||||||
self._state.update(inputs)
|
|
||||||
else:
|
else:
|
||||||
raise TypeError("State must be a BaseModel instance or a dictionary.")
|
raise TypeError("State must be a BaseModel instance or a dictionary.")
|
||||||
|
|
||||||
|
def _restore_state(self, stored_state: Dict[str, Any]) -> None:
|
||||||
|
"""Restore flow state from persistence.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
stored_state: Previously stored state to restore
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If validation fails for structured state
|
||||||
|
TypeError: If state is neither BaseModel nor dictionary
|
||||||
|
"""
|
||||||
|
# When restoring from persistence, use the stored ID
|
||||||
|
stored_id = stored_state.get("id")
|
||||||
|
if not stored_id:
|
||||||
|
raise ValueError("Stored state must have an 'id' field")
|
||||||
|
|
||||||
|
if isinstance(self._state, dict):
|
||||||
|
# For dict states, update all fields from stored state
|
||||||
|
self._state.clear()
|
||||||
|
self._state.update(stored_state)
|
||||||
|
elif isinstance(self._state, BaseModel):
|
||||||
|
# For BaseModel states, create new instance with stored values
|
||||||
|
model = cast(BaseModel, self._state)
|
||||||
|
if hasattr(model, "model_validate"):
|
||||||
|
# Pydantic v2
|
||||||
|
self._state = cast(T, type(model).model_validate(stored_state))
|
||||||
|
elif hasattr(model, "parse_obj"):
|
||||||
|
# Pydantic v1
|
||||||
|
self._state = cast(T, type(model).parse_obj(stored_state))
|
||||||
|
else:
|
||||||
|
# Fallback for other BaseModel implementations
|
||||||
|
self._state = cast(T, type(model)(**stored_state))
|
||||||
|
else:
|
||||||
|
raise TypeError(f"State must be dict or BaseModel, got {type(self._state)}")
|
||||||
|
|
||||||
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||||
|
"""Start the flow execution.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
inputs: Optional dictionary containing input values and potentially a state ID to restore
|
||||||
|
"""
|
||||||
|
# Handle state restoration if ID is provided in inputs
|
||||||
|
if inputs and "id" in inputs and self._persistence is not None:
|
||||||
|
restore_uuid = inputs["id"]
|
||||||
|
stored_state = self._persistence.load_state(restore_uuid)
|
||||||
|
|
||||||
|
# Override the id in the state if it exists in inputs
|
||||||
|
if "id" in inputs:
|
||||||
|
if isinstance(self._state, dict):
|
||||||
|
self._state["id"] = inputs["id"]
|
||||||
|
elif isinstance(self._state, BaseModel):
|
||||||
|
setattr(self._state, "id", inputs["id"])
|
||||||
|
|
||||||
|
if stored_state:
|
||||||
|
self._log_flow_event(
|
||||||
|
f"Loading flow state from memory for UUID: {restore_uuid}",
|
||||||
|
color="yellow",
|
||||||
|
)
|
||||||
|
# Restore the state
|
||||||
|
self._restore_state(stored_state)
|
||||||
|
else:
|
||||||
|
self._log_flow_event(
|
||||||
|
f"No flow state found for UUID: {restore_uuid}", color="red"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply any additional inputs after restoration
|
||||||
|
filtered_inputs = {k: v for k, v in inputs.items() if k != "id"}
|
||||||
|
if filtered_inputs:
|
||||||
|
self._initialize_state(filtered_inputs)
|
||||||
|
|
||||||
|
# Start flow execution
|
||||||
self.event_emitter.send(
|
self.event_emitter.send(
|
||||||
self,
|
self,
|
||||||
event=FlowStartedEvent(
|
event=FlowStartedEvent(
|
||||||
@@ -430,9 +742,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
flow_name=self.__class__.__name__,
|
flow_name=self.__class__.__name__,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
self._log_flow_event(
|
||||||
|
f"Flow started with ID: {self.flow_id}", color="bold_magenta"
|
||||||
|
)
|
||||||
|
|
||||||
if inputs is not None:
|
if inputs is not None and "id" not in inputs:
|
||||||
self._initialize_state(inputs)
|
self._initialize_state(inputs)
|
||||||
|
|
||||||
return asyncio.run(self.kickoff_async())
|
return asyncio.run(self.kickoff_async())
|
||||||
|
|
||||||
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||||
@@ -675,6 +991,32 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
|
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
|
def _log_flow_event(
|
||||||
|
self, message: str, color: str = "yellow", level: str = "info"
|
||||||
|
) -> None:
|
||||||
|
"""Centralized logging method for flow events.
|
||||||
|
|
||||||
|
This method provides a consistent interface for logging flow-related events,
|
||||||
|
combining both console output with colors and proper logging levels.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message: The message to log
|
||||||
|
color: Color to use for console output (default: yellow)
|
||||||
|
Available colors: purple, red, bold_green, bold_purple,
|
||||||
|
bold_blue, yellow, yellow
|
||||||
|
level: Log level to use (default: info)
|
||||||
|
Supported levels: info, warning
|
||||||
|
|
||||||
|
Note:
|
||||||
|
This method uses the Printer utility for colored console output
|
||||||
|
and the standard logging module for log level support.
|
||||||
|
"""
|
||||||
|
self._printer.print(message, color=color)
|
||||||
|
if level == "info":
|
||||||
|
logger.info(message)
|
||||||
|
elif level == "warning":
|
||||||
|
logger.warning(message)
|
||||||
|
|
||||||
def plot(self, filename: str = "crewai_flow") -> None:
|
def plot(self, filename: str = "crewai_flow") -> None:
|
||||||
self._telemetry.flow_plotting_span(
|
self._telemetry.flow_plotting_span(
|
||||||
self.__class__.__name__, list(self._methods.keys())
|
self.__class__.__name__, list(self._methods.keys())
|
||||||
|
|||||||
18
src/crewai/flow/persistence/__init__.py
Normal file
18
src/crewai/flow/persistence/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
"""
|
||||||
|
CrewAI Flow Persistence.
|
||||||
|
|
||||||
|
This module provides interfaces and implementations for persisting flow states.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Any, Dict, TypeVar, Union
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from crewai.flow.persistence.base import FlowPersistence
|
||||||
|
from crewai.flow.persistence.decorators import persist
|
||||||
|
from crewai.flow.persistence.sqlite import SQLiteFlowPersistence
|
||||||
|
|
||||||
|
__all__ = ["FlowPersistence", "persist", "SQLiteFlowPersistence"]
|
||||||
|
|
||||||
|
StateType = TypeVar('StateType', bound=Union[Dict[str, Any], BaseModel])
|
||||||
|
DictStateType = Dict[str, Any]
|
||||||
53
src/crewai/flow/persistence/base.py
Normal file
53
src/crewai/flow/persistence/base.py
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
"""Base class for flow state persistence."""
|
||||||
|
|
||||||
|
import abc
|
||||||
|
from typing import Any, Dict, Optional, Union
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
|
class FlowPersistence(abc.ABC):
|
||||||
|
"""Abstract base class for flow state persistence.
|
||||||
|
|
||||||
|
This class defines the interface that all persistence implementations must follow.
|
||||||
|
It supports both structured (Pydantic BaseModel) and unstructured (dict) states.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def init_db(self) -> None:
|
||||||
|
"""Initialize the persistence backend.
|
||||||
|
|
||||||
|
This method should handle any necessary setup, such as:
|
||||||
|
- Creating tables
|
||||||
|
- Establishing connections
|
||||||
|
- Setting up indexes
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def save_state(
|
||||||
|
self,
|
||||||
|
flow_uuid: str,
|
||||||
|
method_name: str,
|
||||||
|
state_data: Union[Dict[str, Any], BaseModel]
|
||||||
|
) -> None:
|
||||||
|
"""Persist the flow state after method completion.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
flow_uuid: Unique identifier for the flow instance
|
||||||
|
method_name: Name of the method that just completed
|
||||||
|
state_data: Current state data (either dict or Pydantic model)
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Load the most recent state for a given flow UUID.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
flow_uuid: Unique identifier for the flow instance
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The most recent state as a dictionary, or None if no state exists
|
||||||
|
"""
|
||||||
|
pass
|
||||||
252
src/crewai/flow/persistence/decorators.py
Normal file
252
src/crewai/flow/persistence/decorators.py
Normal file
@@ -0,0 +1,252 @@
|
|||||||
|
"""
|
||||||
|
Decorators for flow state persistence.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
from crewai.flow.flow import Flow, start
|
||||||
|
from crewai.flow.persistence import persist, SQLiteFlowPersistence
|
||||||
|
|
||||||
|
class MyFlow(Flow):
|
||||||
|
@start()
|
||||||
|
@persist(SQLiteFlowPersistence())
|
||||||
|
def sync_method(self):
|
||||||
|
# Synchronous method implementation
|
||||||
|
pass
|
||||||
|
|
||||||
|
@start()
|
||||||
|
@persist(SQLiteFlowPersistence())
|
||||||
|
async def async_method(self):
|
||||||
|
# Asynchronous method implementation
|
||||||
|
await some_async_operation()
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import functools
|
||||||
|
import logging
|
||||||
|
from typing import (
|
||||||
|
Any,
|
||||||
|
Callable,
|
||||||
|
Optional,
|
||||||
|
Type,
|
||||||
|
TypeVar,
|
||||||
|
Union,
|
||||||
|
cast,
|
||||||
|
)
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from crewai.flow.persistence.base import FlowPersistence
|
||||||
|
from crewai.flow.persistence.sqlite import SQLiteFlowPersistence
|
||||||
|
from crewai.utilities.printer import Printer
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
T = TypeVar("T")
|
||||||
|
|
||||||
|
# Constants for log messages
|
||||||
|
LOG_MESSAGES = {
|
||||||
|
"save_state": "Saving flow state to memory for ID: {}",
|
||||||
|
"save_error": "Failed to persist state for method {}: {}",
|
||||||
|
"state_missing": "Flow instance has no state",
|
||||||
|
"id_missing": "Flow state must have an 'id' field for persistence"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class PersistenceDecorator:
|
||||||
|
"""Class to handle flow state persistence with consistent logging."""
|
||||||
|
|
||||||
|
_printer = Printer() # Class-level printer instance
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def persist_state(cls, flow_instance: Any, method_name: str, persistence_instance: FlowPersistence) -> None:
|
||||||
|
"""Persist flow state with proper error handling and logging.
|
||||||
|
|
||||||
|
This method handles the persistence of flow state data, including proper
|
||||||
|
error handling and colored console output for status updates.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
flow_instance: The flow instance whose state to persist
|
||||||
|
method_name: Name of the method that triggered persistence
|
||||||
|
persistence_instance: The persistence backend to use
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If flow has no state or state lacks an ID
|
||||||
|
RuntimeError: If state persistence fails
|
||||||
|
AttributeError: If flow instance lacks required state attributes
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
state = getattr(flow_instance, 'state', None)
|
||||||
|
if state is None:
|
||||||
|
raise ValueError("Flow instance has no state")
|
||||||
|
|
||||||
|
flow_uuid: Optional[str] = None
|
||||||
|
if isinstance(state, dict):
|
||||||
|
flow_uuid = state.get('id')
|
||||||
|
elif isinstance(state, BaseModel):
|
||||||
|
flow_uuid = getattr(state, 'id', None)
|
||||||
|
|
||||||
|
if not flow_uuid:
|
||||||
|
raise ValueError("Flow state must have an 'id' field for persistence")
|
||||||
|
|
||||||
|
# Log state saving with consistent message
|
||||||
|
cls._printer.print(LOG_MESSAGES["save_state"].format(flow_uuid), color="cyan")
|
||||||
|
logger.info(LOG_MESSAGES["save_state"].format(flow_uuid))
|
||||||
|
|
||||||
|
try:
|
||||||
|
persistence_instance.save_state(
|
||||||
|
flow_uuid=flow_uuid,
|
||||||
|
method_name=method_name,
|
||||||
|
state_data=state,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = LOG_MESSAGES["save_error"].format(method_name, str(e))
|
||||||
|
cls._printer.print(error_msg, color="red")
|
||||||
|
logger.error(error_msg)
|
||||||
|
raise RuntimeError(f"State persistence failed: {str(e)}") from e
|
||||||
|
except AttributeError:
|
||||||
|
error_msg = LOG_MESSAGES["state_missing"]
|
||||||
|
cls._printer.print(error_msg, color="red")
|
||||||
|
logger.error(error_msg)
|
||||||
|
raise ValueError(error_msg)
|
||||||
|
except (TypeError, ValueError) as e:
|
||||||
|
error_msg = LOG_MESSAGES["id_missing"]
|
||||||
|
cls._printer.print(error_msg, color="red")
|
||||||
|
logger.error(error_msg)
|
||||||
|
raise ValueError(error_msg) from e
|
||||||
|
|
||||||
|
|
||||||
|
def persist(persistence: Optional[FlowPersistence] = None):
|
||||||
|
"""Decorator to persist flow state.
|
||||||
|
|
||||||
|
This decorator can be applied at either the class level or method level.
|
||||||
|
When applied at the class level, it automatically persists all flow method
|
||||||
|
states. When applied at the method level, it persists only that method's
|
||||||
|
state.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
persistence: Optional FlowPersistence implementation to use.
|
||||||
|
If not provided, uses SQLiteFlowPersistence.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A decorator that can be applied to either a class or method
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the flow state doesn't have an 'id' field
|
||||||
|
RuntimeError: If state persistence fails
|
||||||
|
|
||||||
|
Example:
|
||||||
|
@persist # Class-level persistence with default SQLite
|
||||||
|
class MyFlow(Flow[MyState]):
|
||||||
|
@start()
|
||||||
|
def begin(self):
|
||||||
|
pass
|
||||||
|
"""
|
||||||
|
|
||||||
|
def decorator(target: Union[Type, Callable[..., T]]) -> Union[Type, Callable[..., T]]:
|
||||||
|
"""Decorator that handles both class and method decoration."""
|
||||||
|
actual_persistence = persistence or SQLiteFlowPersistence()
|
||||||
|
|
||||||
|
if isinstance(target, type):
|
||||||
|
# Class decoration
|
||||||
|
original_init = getattr(target, "__init__")
|
||||||
|
|
||||||
|
@functools.wraps(original_init)
|
||||||
|
def new_init(self: Any, *args: Any, **kwargs: Any) -> None:
|
||||||
|
if 'persistence' not in kwargs:
|
||||||
|
kwargs['persistence'] = actual_persistence
|
||||||
|
original_init(self, *args, **kwargs)
|
||||||
|
|
||||||
|
setattr(target, "__init__", new_init)
|
||||||
|
|
||||||
|
# Store original methods to preserve their decorators
|
||||||
|
original_methods = {}
|
||||||
|
|
||||||
|
for name, method in target.__dict__.items():
|
||||||
|
if callable(method) and (
|
||||||
|
hasattr(method, "__is_start_method__") or
|
||||||
|
hasattr(method, "__trigger_methods__") or
|
||||||
|
hasattr(method, "__condition_type__") or
|
||||||
|
hasattr(method, "__is_flow_method__") or
|
||||||
|
hasattr(method, "__is_router__")
|
||||||
|
):
|
||||||
|
original_methods[name] = method
|
||||||
|
|
||||||
|
# Create wrapped versions of the methods that include persistence
|
||||||
|
for name, method in original_methods.items():
|
||||||
|
if asyncio.iscoroutinefunction(method):
|
||||||
|
# Create a closure to capture the current name and method
|
||||||
|
def create_async_wrapper(method_name: str, original_method: Callable):
|
||||||
|
@functools.wraps(original_method)
|
||||||
|
async def method_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
||||||
|
result = await original_method(self, *args, **kwargs)
|
||||||
|
PersistenceDecorator.persist_state(self, method_name, actual_persistence)
|
||||||
|
return result
|
||||||
|
return method_wrapper
|
||||||
|
|
||||||
|
wrapped = create_async_wrapper(name, method)
|
||||||
|
|
||||||
|
# Preserve all original decorators and attributes
|
||||||
|
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
||||||
|
if hasattr(method, attr):
|
||||||
|
setattr(wrapped, attr, getattr(method, attr))
|
||||||
|
setattr(wrapped, "__is_flow_method__", True)
|
||||||
|
|
||||||
|
# Update the class with the wrapped method
|
||||||
|
setattr(target, name, wrapped)
|
||||||
|
else:
|
||||||
|
# Create a closure to capture the current name and method
|
||||||
|
def create_sync_wrapper(method_name: str, original_method: Callable):
|
||||||
|
@functools.wraps(original_method)
|
||||||
|
def method_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
||||||
|
result = original_method(self, *args, **kwargs)
|
||||||
|
PersistenceDecorator.persist_state(self, method_name, actual_persistence)
|
||||||
|
return result
|
||||||
|
return method_wrapper
|
||||||
|
|
||||||
|
wrapped = create_sync_wrapper(name, method)
|
||||||
|
|
||||||
|
# Preserve all original decorators and attributes
|
||||||
|
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
||||||
|
if hasattr(method, attr):
|
||||||
|
setattr(wrapped, attr, getattr(method, attr))
|
||||||
|
setattr(wrapped, "__is_flow_method__", True)
|
||||||
|
|
||||||
|
# Update the class with the wrapped method
|
||||||
|
setattr(target, name, wrapped)
|
||||||
|
|
||||||
|
return target
|
||||||
|
else:
|
||||||
|
# Method decoration
|
||||||
|
method = target
|
||||||
|
setattr(method, "__is_flow_method__", True)
|
||||||
|
|
||||||
|
if asyncio.iscoroutinefunction(method):
|
||||||
|
@functools.wraps(method)
|
||||||
|
async def method_async_wrapper(flow_instance: Any, *args: Any, **kwargs: Any) -> T:
|
||||||
|
method_coro = method(flow_instance, *args, **kwargs)
|
||||||
|
if asyncio.iscoroutine(method_coro):
|
||||||
|
result = await method_coro
|
||||||
|
else:
|
||||||
|
result = method_coro
|
||||||
|
PersistenceDecorator.persist_state(flow_instance, method.__name__, actual_persistence)
|
||||||
|
return result
|
||||||
|
|
||||||
|
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
||||||
|
if hasattr(method, attr):
|
||||||
|
setattr(method_async_wrapper, attr, getattr(method, attr))
|
||||||
|
setattr(method_async_wrapper, "__is_flow_method__", True)
|
||||||
|
return cast(Callable[..., T], method_async_wrapper)
|
||||||
|
else:
|
||||||
|
@functools.wraps(method)
|
||||||
|
def method_sync_wrapper(flow_instance: Any, *args: Any, **kwargs: Any) -> T:
|
||||||
|
result = method(flow_instance, *args, **kwargs)
|
||||||
|
PersistenceDecorator.persist_state(flow_instance, method.__name__, actual_persistence)
|
||||||
|
return result
|
||||||
|
|
||||||
|
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
||||||
|
if hasattr(method, attr):
|
||||||
|
setattr(method_sync_wrapper, attr, getattr(method, attr))
|
||||||
|
setattr(method_sync_wrapper, "__is_flow_method__", True)
|
||||||
|
return cast(Callable[..., T], method_sync_wrapper)
|
||||||
|
|
||||||
|
return decorator
|
||||||
123
src/crewai/flow/persistence/sqlite.py
Normal file
123
src/crewai/flow/persistence/sqlite.py
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
"""
|
||||||
|
SQLite-based implementation of flow state persistence.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import sqlite3
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, Optional, Union
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from crewai.flow.persistence.base import FlowPersistence
|
||||||
|
|
||||||
|
|
||||||
|
class SQLiteFlowPersistence(FlowPersistence):
|
||||||
|
"""SQLite-based implementation of flow state persistence.
|
||||||
|
|
||||||
|
This class provides a simple, file-based persistence implementation using SQLite.
|
||||||
|
It's suitable for development and testing, or for production use cases with
|
||||||
|
moderate performance requirements.
|
||||||
|
"""
|
||||||
|
|
||||||
|
db_path: str # Type annotation for instance variable
|
||||||
|
|
||||||
|
def __init__(self, db_path: Optional[str] = None):
|
||||||
|
"""Initialize SQLite persistence.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
db_path: Path to the SQLite database file. If not provided, uses
|
||||||
|
db_storage_path() from utilities.paths.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If db_path is invalid
|
||||||
|
"""
|
||||||
|
from crewai.utilities.paths import db_storage_path
|
||||||
|
# Get path from argument or default location
|
||||||
|
path = db_path or str(Path(db_storage_path()) / "flow_states.db")
|
||||||
|
|
||||||
|
if not path:
|
||||||
|
raise ValueError("Database path must be provided")
|
||||||
|
|
||||||
|
self.db_path = path # Now mypy knows this is str
|
||||||
|
self.init_db()
|
||||||
|
|
||||||
|
def init_db(self) -> None:
|
||||||
|
"""Create the necessary tables if they don't exist."""
|
||||||
|
with sqlite3.connect(self.db_path) as conn:
|
||||||
|
conn.execute("""
|
||||||
|
CREATE TABLE IF NOT EXISTS flow_states (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
flow_uuid TEXT NOT NULL,
|
||||||
|
method_name TEXT NOT NULL,
|
||||||
|
timestamp DATETIME NOT NULL,
|
||||||
|
state_json TEXT NOT NULL
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
# Add index for faster UUID lookups
|
||||||
|
conn.execute("""
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_flow_states_uuid
|
||||||
|
ON flow_states(flow_uuid)
|
||||||
|
""")
|
||||||
|
|
||||||
|
def save_state(
|
||||||
|
self,
|
||||||
|
flow_uuid: str,
|
||||||
|
method_name: str,
|
||||||
|
state_data: Union[Dict[str, Any], BaseModel],
|
||||||
|
) -> None:
|
||||||
|
"""Save the current flow state to SQLite.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
flow_uuid: Unique identifier for the flow instance
|
||||||
|
method_name: Name of the method that just completed
|
||||||
|
state_data: Current state data (either dict or Pydantic model)
|
||||||
|
"""
|
||||||
|
# Convert state_data to dict, handling both Pydantic and dict cases
|
||||||
|
if isinstance(state_data, BaseModel):
|
||||||
|
state_dict = dict(state_data) # Use dict() for better type compatibility
|
||||||
|
elif isinstance(state_data, dict):
|
||||||
|
state_dict = state_data
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"state_data must be either a Pydantic BaseModel or dict, got {type(state_data)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
with sqlite3.connect(self.db_path) as conn:
|
||||||
|
conn.execute("""
|
||||||
|
INSERT INTO flow_states (
|
||||||
|
flow_uuid,
|
||||||
|
method_name,
|
||||||
|
timestamp,
|
||||||
|
state_json
|
||||||
|
) VALUES (?, ?, ?, ?)
|
||||||
|
""", (
|
||||||
|
flow_uuid,
|
||||||
|
method_name,
|
||||||
|
datetime.utcnow().isoformat(),
|
||||||
|
json.dumps(state_dict),
|
||||||
|
))
|
||||||
|
|
||||||
|
def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Load the most recent state for a given flow UUID.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
flow_uuid: Unique identifier for the flow instance
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The most recent state as a dictionary, or None if no state exists
|
||||||
|
"""
|
||||||
|
with sqlite3.connect(self.db_path) as conn:
|
||||||
|
cursor = conn.execute("""
|
||||||
|
SELECT state_json
|
||||||
|
FROM flow_states
|
||||||
|
WHERE flow_uuid = ?
|
||||||
|
ORDER BY id DESC
|
||||||
|
LIMIT 1
|
||||||
|
""", (flow_uuid,))
|
||||||
|
row = cursor.fetchone()
|
||||||
|
|
||||||
|
if row:
|
||||||
|
return json.loads(row[0])
|
||||||
|
return None
|
||||||
@@ -47,7 +47,7 @@ class FastEmbed(BaseEmbedder):
|
|||||||
cache_dir=str(cache_dir) if cache_dir else None,
|
cache_dir=str(cache_dir) if cache_dir else None,
|
||||||
)
|
)
|
||||||
|
|
||||||
def embed_chunks(self, chunks: List[str]) -> List[np.ndarray]:
|
def embed_chunks(self, chunks: List[str]) -> np.ndarray:
|
||||||
"""
|
"""
|
||||||
Generate embeddings for a list of text chunks
|
Generate embeddings for a list of text chunks
|
||||||
|
|
||||||
@@ -55,12 +55,12 @@ class FastEmbed(BaseEmbedder):
|
|||||||
chunks: List of text chunks to embed
|
chunks: List of text chunks to embed
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of embeddings
|
Array of embeddings
|
||||||
"""
|
"""
|
||||||
embeddings = list(self.model.embed(chunks))
|
embeddings = list(self.model.embed(chunks))
|
||||||
return embeddings
|
return np.stack(embeddings)
|
||||||
|
|
||||||
def embed_texts(self, texts: List[str]) -> List[np.ndarray]:
|
def embed_texts(self, texts: List[str]) -> np.ndarray:
|
||||||
"""
|
"""
|
||||||
Generate embeddings for a list of texts
|
Generate embeddings for a list of texts
|
||||||
|
|
||||||
@@ -68,10 +68,10 @@ class FastEmbed(BaseEmbedder):
|
|||||||
texts: List of texts to embed
|
texts: List of texts to embed
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of embeddings
|
Array of embeddings
|
||||||
"""
|
"""
|
||||||
embeddings = list(self.model.embed(texts))
|
embeddings = list(self.model.embed(texts))
|
||||||
return embeddings
|
return np.stack(embeddings)
|
||||||
|
|
||||||
def embed_text(self, text: str) -> np.ndarray:
|
def embed_text(self, text: str) -> np.ndarray:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -15,20 +15,20 @@ class Knowledge(BaseModel):
|
|||||||
Args:
|
Args:
|
||||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||||
storage: Optional[KnowledgeStorage] = Field(default=None)
|
storage: Optional[KnowledgeStorage] = Field(default=None)
|
||||||
embedder_config: Optional[Dict[str, Any]] = None
|
embedder: Optional[Dict[str, Any]] = None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||||
storage: Optional[KnowledgeStorage] = Field(default=None)
|
storage: Optional[KnowledgeStorage] = Field(default=None)
|
||||||
embedder_config: Optional[Dict[str, Any]] = None
|
embedder: Optional[Dict[str, Any]] = None
|
||||||
collection_name: Optional[str] = None
|
collection_name: Optional[str] = None
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
collection_name: str,
|
collection_name: str,
|
||||||
sources: List[BaseKnowledgeSource],
|
sources: List[BaseKnowledgeSource],
|
||||||
embedder_config: Optional[Dict[str, Any]] = None,
|
embedder: Optional[Dict[str, Any]] = None,
|
||||||
storage: Optional[KnowledgeStorage] = None,
|
storage: Optional[KnowledgeStorage] = None,
|
||||||
**data,
|
**data,
|
||||||
):
|
):
|
||||||
@@ -37,25 +37,23 @@ class Knowledge(BaseModel):
|
|||||||
self.storage = storage
|
self.storage = storage
|
||||||
else:
|
else:
|
||||||
self.storage = KnowledgeStorage(
|
self.storage = KnowledgeStorage(
|
||||||
embedder_config=embedder_config, collection_name=collection_name
|
embedder=embedder, collection_name=collection_name
|
||||||
)
|
)
|
||||||
self.sources = sources
|
self.sources = sources
|
||||||
self.storage.initialize_knowledge_storage()
|
self.storage.initialize_knowledge_storage()
|
||||||
for source in sources:
|
self._add_sources()
|
||||||
source.storage = self.storage
|
|
||||||
source.add()
|
|
||||||
|
|
||||||
def query(self, query: List[str], limit: int = 3) -> List[Dict[str, Any]]:
|
def query(self, query: List[str], limit: int = 3) -> List[Dict[str, Any]]:
|
||||||
"""
|
"""
|
||||||
Query across all knowledge sources to find the most relevant information.
|
Query across all knowledge sources to find the most relevant information.
|
||||||
Returns the top_k most relevant chunks.
|
Returns the top_k most relevant chunks.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If storage is not initialized.
|
ValueError: If storage is not initialized.
|
||||||
"""
|
"""
|
||||||
if self.storage is None:
|
if self.storage is None:
|
||||||
raise ValueError("Storage is not initialized.")
|
raise ValueError("Storage is not initialized.")
|
||||||
|
|
||||||
results = self.storage.search(
|
results = self.storage.search(
|
||||||
query,
|
query,
|
||||||
limit,
|
limit,
|
||||||
@@ -63,6 +61,15 @@ class Knowledge(BaseModel):
|
|||||||
return results
|
return results
|
||||||
|
|
||||||
def _add_sources(self):
|
def _add_sources(self):
|
||||||
for source in self.sources:
|
try:
|
||||||
source.storage = self.storage
|
for source in self.sources:
|
||||||
source.add()
|
source.storage = self.storage
|
||||||
|
source.add()
|
||||||
|
except Exception as e:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def reset(self) -> None:
|
||||||
|
if self.storage:
|
||||||
|
self.storage.reset()
|
||||||
|
else:
|
||||||
|
raise ValueError("Storage is not initialized.")
|
||||||
|
|||||||
@@ -29,7 +29,13 @@ class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
|
|||||||
def validate_file_path(cls, v, info):
|
def validate_file_path(cls, v, info):
|
||||||
"""Validate that at least one of file_path or file_paths is provided."""
|
"""Validate that at least one of file_path or file_paths is provided."""
|
||||||
# Single check if both are None, O(1) instead of nested conditions
|
# Single check if both are None, O(1) instead of nested conditions
|
||||||
if v is None and info.data.get("file_path" if info.field_name == "file_paths" else "file_paths") is None:
|
if (
|
||||||
|
v is None
|
||||||
|
and info.data.get(
|
||||||
|
"file_path" if info.field_name == "file_paths" else "file_paths"
|
||||||
|
)
|
||||||
|
is None
|
||||||
|
):
|
||||||
raise ValueError("Either file_path or file_paths must be provided")
|
raise ValueError("Either file_path or file_paths must be provided")
|
||||||
return v
|
return v
|
||||||
|
|
||||||
|
|||||||
@@ -2,11 +2,17 @@ from pathlib import Path
|
|||||||
from typing import Iterator, List, Optional, Union
|
from typing import Iterator, List, Optional, Union
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from docling.datamodel.base_models import InputFormat
|
try:
|
||||||
from docling.document_converter import DocumentConverter
|
from docling.datamodel.base_models import InputFormat
|
||||||
from docling.exceptions import ConversionError
|
from docling.document_converter import DocumentConverter
|
||||||
from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
|
from docling.exceptions import ConversionError
|
||||||
from docling_core.types.doc.document import DoclingDocument
|
from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
|
||||||
|
from docling_core.types.doc.document import DoclingDocument
|
||||||
|
|
||||||
|
DOCLING_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
DOCLING_AVAILABLE = False
|
||||||
|
|
||||||
from pydantic import Field
|
from pydantic import Field
|
||||||
|
|
||||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||||
@@ -19,14 +25,22 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
|||||||
This will auto support PDF, DOCX, and TXT, XLSX, Images, and HTML files without any additional dependencies and follows the docling package as the source of truth.
|
This will auto support PDF, DOCX, and TXT, XLSX, Images, and HTML files without any additional dependencies and follows the docling package as the source of truth.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
if not DOCLING_AVAILABLE:
|
||||||
|
raise ImportError(
|
||||||
|
"The docling package is required to use CrewDoclingSource. "
|
||||||
|
"Please install it using: uv add docling"
|
||||||
|
)
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
_logger: Logger = Logger(verbose=True)
|
_logger: Logger = Logger(verbose=True)
|
||||||
|
|
||||||
file_path: Optional[List[Union[Path, str]]] = Field(default=None)
|
file_path: Optional[List[Union[Path, str]]] = Field(default=None)
|
||||||
file_paths: List[Union[Path, str]] = Field(default_factory=list)
|
file_paths: List[Union[Path, str]] = Field(default_factory=list)
|
||||||
chunks: List[str] = Field(default_factory=list)
|
chunks: List[str] = Field(default_factory=list)
|
||||||
safe_file_paths: List[Union[Path, str]] = Field(default_factory=list)
|
safe_file_paths: List[Union[Path, str]] = Field(default_factory=list)
|
||||||
content: List[DoclingDocument] = Field(default_factory=list)
|
content: List["DoclingDocument"] = Field(default_factory=list)
|
||||||
document_converter: DocumentConverter = Field(
|
document_converter: "DocumentConverter" = Field(
|
||||||
default_factory=lambda: DocumentConverter(
|
default_factory=lambda: DocumentConverter(
|
||||||
allowed_formats=[
|
allowed_formats=[
|
||||||
InputFormat.MD,
|
InputFormat.MD,
|
||||||
@@ -52,7 +66,7 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
|||||||
self.safe_file_paths = self.validate_content()
|
self.safe_file_paths = self.validate_content()
|
||||||
self.content = self._load_content()
|
self.content = self._load_content()
|
||||||
|
|
||||||
def _load_content(self) -> List[DoclingDocument]:
|
def _load_content(self) -> List["DoclingDocument"]:
|
||||||
try:
|
try:
|
||||||
return self._convert_source_to_docling_documents()
|
return self._convert_source_to_docling_documents()
|
||||||
except ConversionError as e:
|
except ConversionError as e:
|
||||||
@@ -74,11 +88,11 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
|||||||
self.chunks.extend(list(new_chunks_iterable))
|
self.chunks.extend(list(new_chunks_iterable))
|
||||||
self._save_documents()
|
self._save_documents()
|
||||||
|
|
||||||
def _convert_source_to_docling_documents(self) -> List[DoclingDocument]:
|
def _convert_source_to_docling_documents(self) -> List["DoclingDocument"]:
|
||||||
conv_results_iter = self.document_converter.convert_all(self.safe_file_paths)
|
conv_results_iter = self.document_converter.convert_all(self.safe_file_paths)
|
||||||
return [result.document for result in conv_results_iter]
|
return [result.document for result in conv_results_iter]
|
||||||
|
|
||||||
def _chunk_doc(self, doc: DoclingDocument) -> Iterator[str]:
|
def _chunk_doc(self, doc: "DoclingDocument") -> Iterator[str]:
|
||||||
chunker = HierarchicalChunker()
|
chunker = HierarchicalChunker()
|
||||||
for chunk in chunker.chunk(doc):
|
for chunk in chunker.chunk(doc):
|
||||||
yield chunk.text
|
yield chunk.text
|
||||||
|
|||||||
@@ -48,11 +48,11 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
embedder_config: Optional[Dict[str, Any]] = None,
|
embedder: Optional[Dict[str, Any]] = None,
|
||||||
collection_name: Optional[str] = None,
|
collection_name: Optional[str] = None,
|
||||||
):
|
):
|
||||||
self.collection_name = collection_name
|
self.collection_name = collection_name
|
||||||
self._set_embedder_config(embedder_config)
|
self._set_embedder_config(embedder)
|
||||||
|
|
||||||
def search(
|
def search(
|
||||||
self,
|
self,
|
||||||
@@ -99,7 +99,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
|||||||
)
|
)
|
||||||
if self.app:
|
if self.app:
|
||||||
self.collection = self.app.get_or_create_collection(
|
self.collection = self.app.get_or_create_collection(
|
||||||
name=collection_name, embedding_function=self.embedder_config
|
name=collection_name, embedding_function=self.embedder
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise Exception("Vector Database Client not initialized")
|
raise Exception("Vector Database Client not initialized")
|
||||||
@@ -154,9 +154,15 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
|||||||
filtered_ids.append(doc_id)
|
filtered_ids.append(doc_id)
|
||||||
|
|
||||||
# If we have no metadata at all, set it to None
|
# If we have no metadata at all, set it to None
|
||||||
final_metadata: Optional[OneOrMany[chromadb.Metadata]] = (
|
final_metadata: Optional[List[Dict[str, Union[str, int, float, bool]]]] = None
|
||||||
None if all(m is None for m in filtered_metadata) else filtered_metadata
|
if not all(m is None for m in filtered_metadata):
|
||||||
)
|
final_metadata = []
|
||||||
|
for m in filtered_metadata:
|
||||||
|
if m is not None:
|
||||||
|
filtered_m = {k: v for k, v in m.items() if isinstance(v, (str, int, float, bool))}
|
||||||
|
final_metadata.append(filtered_m)
|
||||||
|
else:
|
||||||
|
final_metadata.append({"empty": True})
|
||||||
|
|
||||||
self.collection.upsert(
|
self.collection.upsert(
|
||||||
documents=filtered_docs,
|
documents=filtered_docs,
|
||||||
@@ -187,17 +193,15 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
|||||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||||
)
|
)
|
||||||
|
|
||||||
def _set_embedder_config(
|
def _set_embedder_config(self, embedder: Optional[Dict[str, Any]] = None) -> None:
|
||||||
self, embedder_config: Optional[Dict[str, Any]] = None
|
|
||||||
) -> None:
|
|
||||||
"""Set the embedding configuration for the knowledge storage.
|
"""Set the embedding configuration for the knowledge storage.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
embedder_config (Optional[Dict[str, Any]]): Configuration dictionary for the embedder.
|
embedder_config (Optional[Dict[str, Any]]): Configuration dictionary for the embedder.
|
||||||
If None or empty, defaults to the default embedding function.
|
If None or empty, defaults to the default embedding function.
|
||||||
"""
|
"""
|
||||||
self.embedder_config = (
|
self.embedder = (
|
||||||
EmbeddingConfigurator().configure_embedder(embedder_config)
|
EmbeddingConfigurator().configure_embedder(embedder)
|
||||||
if embedder_config
|
if embedder
|
||||||
else self._create_default_embedding_function()
|
else self._create_default_embedding_function()
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,20 +1,29 @@
|
|||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
import warnings
|
import warnings
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from typing import Any, Dict, List, Optional, Union
|
from typing import Any, Dict, List, Literal, Optional, Type, Union, cast
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
warnings.simplefilter("ignore", UserWarning)
|
warnings.simplefilter("ignore", UserWarning)
|
||||||
import litellm
|
import litellm
|
||||||
from litellm import get_supported_openai_params
|
from litellm import Choices, get_supported_openai_params
|
||||||
|
from litellm.types.utils import ModelResponse
|
||||||
|
from litellm.utils import supports_response_schema
|
||||||
|
|
||||||
|
|
||||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||||
LLMContextLengthExceededException,
|
LLMContextLengthExceededException,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
|
||||||
class FilteredStream:
|
class FilteredStream:
|
||||||
def __init__(self, original_stream):
|
def __init__(self, original_stream):
|
||||||
@@ -23,6 +32,7 @@ class FilteredStream:
|
|||||||
|
|
||||||
def write(self, s) -> int:
|
def write(self, s) -> int:
|
||||||
with self._lock:
|
with self._lock:
|
||||||
|
# Filter out extraneous messages from LiteLLM
|
||||||
if (
|
if (
|
||||||
"Give Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new"
|
"Give Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new"
|
||||||
in s
|
in s
|
||||||
@@ -68,6 +78,18 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|||||||
"mixtral-8x7b-32768": 32768,
|
"mixtral-8x7b-32768": 32768,
|
||||||
"llama-3.3-70b-versatile": 128000,
|
"llama-3.3-70b-versatile": 128000,
|
||||||
"llama-3.3-70b-instruct": 128000,
|
"llama-3.3-70b-instruct": 128000,
|
||||||
|
# sambanova
|
||||||
|
"Meta-Llama-3.3-70B-Instruct": 131072,
|
||||||
|
"QwQ-32B-Preview": 8192,
|
||||||
|
"Qwen2.5-72B-Instruct": 8192,
|
||||||
|
"Qwen2.5-Coder-32B-Instruct": 8192,
|
||||||
|
"Meta-Llama-3.1-405B-Instruct": 8192,
|
||||||
|
"Meta-Llama-3.1-70B-Instruct": 131072,
|
||||||
|
"Meta-Llama-3.1-8B-Instruct": 131072,
|
||||||
|
"Llama-3.2-90B-Vision-Instruct": 16384,
|
||||||
|
"Llama-3.2-11B-Vision-Instruct": 16384,
|
||||||
|
"Meta-Llama-3.2-3B-Instruct": 4096,
|
||||||
|
"Meta-Llama-3.2-1B-Instruct": 16384,
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFAULT_CONTEXT_WINDOW_SIZE = 8192
|
DEFAULT_CONTEXT_WINDOW_SIZE = 8192
|
||||||
@@ -78,17 +100,18 @@ CONTEXT_WINDOW_USAGE_RATIO = 0.75
|
|||||||
def suppress_warnings():
|
def suppress_warnings():
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
warnings.filterwarnings("ignore")
|
warnings.filterwarnings("ignore")
|
||||||
|
warnings.filterwarnings(
|
||||||
|
"ignore", message="open_text is deprecated*", category=DeprecationWarning
|
||||||
|
)
|
||||||
|
|
||||||
# Redirect stdout and stderr
|
# Redirect stdout and stderr
|
||||||
old_stdout = sys.stdout
|
old_stdout = sys.stdout
|
||||||
old_stderr = sys.stderr
|
old_stderr = sys.stderr
|
||||||
sys.stdout = FilteredStream(old_stdout)
|
sys.stdout = FilteredStream(old_stdout)
|
||||||
sys.stderr = FilteredStream(old_stderr)
|
sys.stderr = FilteredStream(old_stderr)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
yield
|
yield
|
||||||
finally:
|
finally:
|
||||||
# Restore stdout and stderr
|
|
||||||
sys.stdout = old_stdout
|
sys.stdout = old_stdout
|
||||||
sys.stderr = old_stderr
|
sys.stderr = old_stderr
|
||||||
|
|
||||||
@@ -107,14 +130,16 @@ class LLM:
|
|||||||
presence_penalty: Optional[float] = None,
|
presence_penalty: Optional[float] = None,
|
||||||
frequency_penalty: Optional[float] = None,
|
frequency_penalty: Optional[float] = None,
|
||||||
logit_bias: Optional[Dict[int, float]] = None,
|
logit_bias: Optional[Dict[int, float]] = None,
|
||||||
response_format: Optional[Dict[str, Any]] = None,
|
response_format: Optional[Type[BaseModel]] = None,
|
||||||
seed: Optional[int] = None,
|
seed: Optional[int] = None,
|
||||||
logprobs: Optional[bool] = None,
|
logprobs: Optional[int] = None,
|
||||||
top_logprobs: Optional[int] = None,
|
top_logprobs: Optional[int] = None,
|
||||||
base_url: Optional[str] = None,
|
base_url: Optional[str] = None,
|
||||||
|
api_base: Optional[str] = None,
|
||||||
api_version: Optional[str] = None,
|
api_version: Optional[str] = None,
|
||||||
api_key: Optional[str] = None,
|
api_key: Optional[str] = None,
|
||||||
callbacks: List[Any] = [],
|
callbacks: List[Any] = [],
|
||||||
|
reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
):
|
):
|
||||||
self.model = model
|
self.model = model
|
||||||
@@ -122,7 +147,6 @@ class LLM:
|
|||||||
self.temperature = temperature
|
self.temperature = temperature
|
||||||
self.top_p = top_p
|
self.top_p = top_p
|
||||||
self.n = n
|
self.n = n
|
||||||
self.stop = stop
|
|
||||||
self.max_completion_tokens = max_completion_tokens
|
self.max_completion_tokens = max_completion_tokens
|
||||||
self.max_tokens = max_tokens
|
self.max_tokens = max_tokens
|
||||||
self.presence_penalty = presence_penalty
|
self.presence_penalty = presence_penalty
|
||||||
@@ -133,26 +157,110 @@ class LLM:
|
|||||||
self.logprobs = logprobs
|
self.logprobs = logprobs
|
||||||
self.top_logprobs = top_logprobs
|
self.top_logprobs = top_logprobs
|
||||||
self.base_url = base_url
|
self.base_url = base_url
|
||||||
|
self.api_base = api_base
|
||||||
self.api_version = api_version
|
self.api_version = api_version
|
||||||
self.api_key = api_key
|
self.api_key = api_key
|
||||||
self.callbacks = callbacks
|
self.callbacks = callbacks
|
||||||
self.context_window_size = 0
|
self.context_window_size = 0
|
||||||
self.kwargs = kwargs
|
self.reasoning_effort = reasoning_effort
|
||||||
|
self.additional_params = kwargs
|
||||||
|
self.is_anthropic = self._is_anthropic_model(model)
|
||||||
|
|
||||||
litellm.drop_params = True
|
litellm.drop_params = True
|
||||||
|
|
||||||
|
# Normalize self.stop to always be a List[str]
|
||||||
|
if stop is None:
|
||||||
|
self.stop: List[str] = []
|
||||||
|
elif isinstance(stop, str):
|
||||||
|
self.stop = [stop]
|
||||||
|
else:
|
||||||
|
self.stop = stop
|
||||||
|
|
||||||
self.set_callbacks(callbacks)
|
self.set_callbacks(callbacks)
|
||||||
self.set_env_callbacks()
|
self.set_env_callbacks()
|
||||||
|
|
||||||
def call(self, messages: List[Dict[str, str]], callbacks: List[Any] = []) -> str:
|
def _is_anthropic_model(self, model: str) -> bool:
|
||||||
|
"""Determine if the model is from Anthropic provider.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: The model identifier string.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the model is from Anthropic, False otherwise.
|
||||||
|
"""
|
||||||
|
ANTHROPIC_PREFIXES = ('anthropic/', 'claude-', 'claude/')
|
||||||
|
return any(prefix in model.lower() for prefix in ANTHROPIC_PREFIXES)
|
||||||
|
|
||||||
|
def call(
|
||||||
|
self,
|
||||||
|
messages: Union[str, List[Dict[str, str]]],
|
||||||
|
tools: Optional[List[dict]] = None,
|
||||||
|
callbacks: Optional[List[Any]] = None,
|
||||||
|
available_functions: Optional[Dict[str, Any]] = None,
|
||||||
|
) -> Union[str, Any]:
|
||||||
|
"""High-level LLM call method.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
messages: Input messages for the LLM.
|
||||||
|
Can be a string or list of message dictionaries.
|
||||||
|
If string, it will be converted to a single user message.
|
||||||
|
If list, each dict must have 'role' and 'content' keys.
|
||||||
|
tools: Optional list of tool schemas for function calling.
|
||||||
|
Each tool should define its name, description, and parameters.
|
||||||
|
callbacks: Optional list of callback functions to be executed
|
||||||
|
during and after the LLM call.
|
||||||
|
available_functions: Optional dict mapping function names to callables
|
||||||
|
that can be invoked by the LLM.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Union[str, Any]: Either a text response from the LLM (str) or
|
||||||
|
the result of a tool function call (Any).
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
TypeError: If messages format is invalid
|
||||||
|
ValueError: If response format is not supported
|
||||||
|
LLMContextLengthExceededException: If input exceeds model's context limit
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
# Example 1: Simple string input
|
||||||
|
>>> response = llm.call("Return the name of a random city.")
|
||||||
|
>>> print(response)
|
||||||
|
"Paris"
|
||||||
|
|
||||||
|
# Example 2: Message list with system and user messages
|
||||||
|
>>> messages = [
|
||||||
|
... {"role": "system", "content": "You are a geography expert"},
|
||||||
|
... {"role": "user", "content": "What is France's capital?"}
|
||||||
|
... ]
|
||||||
|
>>> response = llm.call(messages)
|
||||||
|
>>> print(response)
|
||||||
|
"The capital of France is Paris."
|
||||||
|
"""
|
||||||
|
# Validate parameters before proceeding with the call.
|
||||||
|
self._validate_call_params()
|
||||||
|
|
||||||
|
if isinstance(messages, str):
|
||||||
|
messages = [{"role": "user", "content": messages}]
|
||||||
|
|
||||||
|
# For O1 models, system messages are not supported.
|
||||||
|
# Convert any system messages into assistant messages.
|
||||||
|
if "o1" in self.model.lower():
|
||||||
|
for message in messages:
|
||||||
|
if message.get("role") == "system":
|
||||||
|
message["role"] = "assistant"
|
||||||
|
|
||||||
with suppress_warnings():
|
with suppress_warnings():
|
||||||
if callbacks and len(callbacks) > 0:
|
if callbacks and len(callbacks) > 0:
|
||||||
self.set_callbacks(callbacks)
|
self.set_callbacks(callbacks)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
# --- 1) Format messages according to provider requirements
|
||||||
|
formatted_messages = self._format_messages_for_provider(messages)
|
||||||
|
|
||||||
|
# --- 2) Prepare the parameters for the completion call
|
||||||
params = {
|
params = {
|
||||||
"model": self.model,
|
"model": self.model,
|
||||||
"messages": messages,
|
"messages": formatted_messages,
|
||||||
"timeout": self.timeout,
|
"timeout": self.timeout,
|
||||||
"temperature": self.temperature,
|
"temperature": self.temperature,
|
||||||
"top_p": self.top_p,
|
"top_p": self.top_p,
|
||||||
@@ -166,25 +274,141 @@ class LLM:
|
|||||||
"seed": self.seed,
|
"seed": self.seed,
|
||||||
"logprobs": self.logprobs,
|
"logprobs": self.logprobs,
|
||||||
"top_logprobs": self.top_logprobs,
|
"top_logprobs": self.top_logprobs,
|
||||||
"api_base": self.base_url,
|
"api_base": self.api_base,
|
||||||
|
"base_url": self.base_url,
|
||||||
"api_version": self.api_version,
|
"api_version": self.api_version,
|
||||||
"api_key": self.api_key,
|
"api_key": self.api_key,
|
||||||
"stream": False,
|
"stream": False,
|
||||||
**self.kwargs,
|
"tools": tools,
|
||||||
|
"reasoning_effort": self.reasoning_effort,
|
||||||
|
**self.additional_params,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Remove None values to avoid passing unnecessary parameters
|
# Remove None values from params
|
||||||
params = {k: v for k, v in params.items() if v is not None}
|
params = {k: v for k, v in params.items() if v is not None}
|
||||||
|
|
||||||
|
# --- 2) Make the completion call
|
||||||
response = litellm.completion(**params)
|
response = litellm.completion(**params)
|
||||||
return response["choices"][0]["message"]["content"]
|
response_message = cast(Choices, cast(ModelResponse, response).choices)[
|
||||||
|
0
|
||||||
|
].message
|
||||||
|
text_response = response_message.content or ""
|
||||||
|
tool_calls = getattr(response_message, "tool_calls", [])
|
||||||
|
|
||||||
|
# --- 3) Handle callbacks with usage info
|
||||||
|
if callbacks and len(callbacks) > 0:
|
||||||
|
for callback in callbacks:
|
||||||
|
if hasattr(callback, "log_success_event"):
|
||||||
|
usage_info = getattr(response, "usage", None)
|
||||||
|
if usage_info:
|
||||||
|
callback.log_success_event(
|
||||||
|
kwargs=params,
|
||||||
|
response_obj={"usage": usage_info},
|
||||||
|
start_time=0,
|
||||||
|
end_time=0,
|
||||||
|
)
|
||||||
|
|
||||||
|
# --- 4) If no tool calls, return the text response
|
||||||
|
if not tool_calls or not available_functions:
|
||||||
|
return text_response
|
||||||
|
|
||||||
|
# --- 5) Handle the tool call
|
||||||
|
tool_call = tool_calls[0]
|
||||||
|
function_name = tool_call.function.name
|
||||||
|
|
||||||
|
if function_name in available_functions:
|
||||||
|
try:
|
||||||
|
function_args = json.loads(tool_call.function.arguments)
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
logging.warning(f"Failed to parse function arguments: {e}")
|
||||||
|
return text_response
|
||||||
|
|
||||||
|
fn = available_functions[function_name]
|
||||||
|
try:
|
||||||
|
# Call the actual tool function
|
||||||
|
result = fn(**function_args)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(
|
||||||
|
f"Error executing function '{function_name}': {e}"
|
||||||
|
)
|
||||||
|
return text_response
|
||||||
|
|
||||||
|
else:
|
||||||
|
logging.warning(
|
||||||
|
f"Tool call requested unknown function '{function_name}'"
|
||||||
|
)
|
||||||
|
return text_response
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if not LLMContextLengthExceededException(
|
if not LLMContextLengthExceededException(
|
||||||
str(e)
|
str(e)
|
||||||
)._is_context_limit_error(str(e)):
|
)._is_context_limit_error(str(e)):
|
||||||
logging.error(f"LiteLLM call failed: {str(e)}")
|
logging.error(f"LiteLLM call failed: {str(e)}")
|
||||||
|
raise
|
||||||
|
|
||||||
raise # Re-raise the exception after logging
|
def _format_messages_for_provider(self, messages: List[Dict[str, str]]) -> List[Dict[str, str]]:
|
||||||
|
"""Format messages according to provider requirements.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
messages: List of message dictionaries with 'role' and 'content' keys.
|
||||||
|
Can be empty or None.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of formatted messages according to provider requirements.
|
||||||
|
For Anthropic models, ensures first message has 'user' role.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
TypeError: If messages is None or contains invalid message format.
|
||||||
|
"""
|
||||||
|
if messages is None:
|
||||||
|
raise TypeError("Messages cannot be None")
|
||||||
|
|
||||||
|
# Validate message format first
|
||||||
|
for msg in messages:
|
||||||
|
if not isinstance(msg, dict) or "role" not in msg or "content" not in msg:
|
||||||
|
raise TypeError("Invalid message format. Each message must be a dict with 'role' and 'content' keys")
|
||||||
|
|
||||||
|
if not self.is_anthropic:
|
||||||
|
return messages
|
||||||
|
|
||||||
|
# Anthropic requires messages to start with 'user' role
|
||||||
|
if not messages or messages[0]["role"] == "system":
|
||||||
|
# If first message is system or empty, add a placeholder user message
|
||||||
|
return [{"role": "user", "content": "."}, *messages]
|
||||||
|
|
||||||
|
return messages
|
||||||
|
|
||||||
|
def _get_custom_llm_provider(self) -> str:
|
||||||
|
"""
|
||||||
|
Derives the custom_llm_provider from the model string.
|
||||||
|
- For example, if the model is "openrouter/deepseek/deepseek-chat", returns "openrouter".
|
||||||
|
- If the model is "gemini/gemini-1.5-pro", returns "gemini".
|
||||||
|
- If there is no '/', defaults to "openai".
|
||||||
|
"""
|
||||||
|
if "/" in self.model:
|
||||||
|
return self.model.split("/")[0]
|
||||||
|
return "openai"
|
||||||
|
|
||||||
|
def _validate_call_params(self) -> None:
|
||||||
|
"""
|
||||||
|
Validate parameters before making a call. Currently this only checks if
|
||||||
|
a response_format is provided and whether the model supports it.
|
||||||
|
The custom_llm_provider is dynamically determined from the model:
|
||||||
|
- E.g., "openrouter/deepseek/deepseek-chat" yields "openrouter"
|
||||||
|
- "gemini/gemini-1.5-pro" yields "gemini"
|
||||||
|
- If no slash is present, "openai" is assumed.
|
||||||
|
"""
|
||||||
|
provider = self._get_custom_llm_provider()
|
||||||
|
if self.response_format is not None and not supports_response_schema(
|
||||||
|
model=self.model,
|
||||||
|
custom_llm_provider=provider,
|
||||||
|
):
|
||||||
|
raise ValueError(
|
||||||
|
f"The model {self.model} does not support response_format for provider '{provider}'. "
|
||||||
|
"Please remove response_format or use a supported model."
|
||||||
|
)
|
||||||
|
|
||||||
def supports_function_calling(self) -> bool:
|
def supports_function_calling(self) -> bool:
|
||||||
try:
|
try:
|
||||||
@@ -203,7 +427,10 @@ class LLM:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def get_context_window_size(self) -> int:
|
def get_context_window_size(self) -> int:
|
||||||
# Only using 75% of the context window size to avoid cutting the message in the middle
|
"""
|
||||||
|
Returns the context window size, using 75% of the maximum to avoid
|
||||||
|
cutting off messages mid-thread.
|
||||||
|
"""
|
||||||
if self.context_window_size != 0:
|
if self.context_window_size != 0:
|
||||||
return self.context_window_size
|
return self.context_window_size
|
||||||
|
|
||||||
@@ -216,16 +443,21 @@ class LLM:
|
|||||||
return self.context_window_size
|
return self.context_window_size
|
||||||
|
|
||||||
def set_callbacks(self, callbacks: List[Any]):
|
def set_callbacks(self, callbacks: List[Any]):
|
||||||
callback_types = [type(callback) for callback in callbacks]
|
"""
|
||||||
for callback in litellm.success_callback[:]:
|
Attempt to keep a single set of callbacks in litellm by removing old
|
||||||
if type(callback) in callback_types:
|
duplicates and adding new ones.
|
||||||
litellm.success_callback.remove(callback)
|
"""
|
||||||
|
with suppress_warnings():
|
||||||
|
callback_types = [type(callback) for callback in callbacks]
|
||||||
|
for callback in litellm.success_callback[:]:
|
||||||
|
if type(callback) in callback_types:
|
||||||
|
litellm.success_callback.remove(callback)
|
||||||
|
|
||||||
for callback in litellm._async_success_callback[:]:
|
for callback in litellm._async_success_callback[:]:
|
||||||
if type(callback) in callback_types:
|
if type(callback) in callback_types:
|
||||||
litellm._async_success_callback.remove(callback)
|
litellm._async_success_callback.remove(callback)
|
||||||
|
|
||||||
litellm.callbacks = callbacks
|
litellm.callbacks = callbacks
|
||||||
|
|
||||||
def set_env_callbacks(self):
|
def set_env_callbacks(self):
|
||||||
"""
|
"""
|
||||||
@@ -246,19 +478,20 @@ class LLM:
|
|||||||
This will set `litellm.success_callback` to ["langfuse", "langsmith"] and
|
This will set `litellm.success_callback` to ["langfuse", "langsmith"] and
|
||||||
`litellm.failure_callback` to ["langfuse"].
|
`litellm.failure_callback` to ["langfuse"].
|
||||||
"""
|
"""
|
||||||
success_callbacks_str = os.environ.get("LITELLM_SUCCESS_CALLBACKS", "")
|
with suppress_warnings():
|
||||||
success_callbacks = []
|
success_callbacks_str = os.environ.get("LITELLM_SUCCESS_CALLBACKS", "")
|
||||||
if success_callbacks_str:
|
success_callbacks = []
|
||||||
success_callbacks = [
|
if success_callbacks_str:
|
||||||
callback.strip() for callback in success_callbacks_str.split(",")
|
success_callbacks = [
|
||||||
]
|
cb.strip() for cb in success_callbacks_str.split(",") if cb.strip()
|
||||||
|
]
|
||||||
|
|
||||||
failure_callbacks_str = os.environ.get("LITELLM_FAILURE_CALLBACKS", "")
|
failure_callbacks_str = os.environ.get("LITELLM_FAILURE_CALLBACKS", "")
|
||||||
failure_callbacks = []
|
failure_callbacks = []
|
||||||
if failure_callbacks_str:
|
if failure_callbacks_str:
|
||||||
failure_callbacks = [
|
failure_callbacks = [
|
||||||
callback.strip() for callback in failure_callbacks_str.split(",")
|
cb.strip() for cb in failure_callbacks_str.split(",") if cb.strip()
|
||||||
]
|
]
|
||||||
|
|
||||||
litellm.success_callback = success_callbacks
|
litellm.success_callback = success_callbacks
|
||||||
litellm.failure_callback = failure_callbacks
|
litellm.failure_callback = failure_callbacks
|
||||||
|
|||||||
@@ -1,3 +1,7 @@
|
|||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from pydantic import PrivateAttr
|
||||||
|
|
||||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||||
from crewai.memory.memory import Memory
|
from crewai.memory.memory import Memory
|
||||||
from crewai.memory.storage.rag_storage import RAGStorage
|
from crewai.memory.storage.rag_storage import RAGStorage
|
||||||
@@ -10,13 +14,15 @@ class EntityMemory(Memory):
|
|||||||
Inherits from the Memory class.
|
Inherits from the Memory class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
_memory_provider: Optional[str] = PrivateAttr()
|
||||||
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
|
||||||
self.memory_provider = crew.memory_config.get("provider")
|
|
||||||
else:
|
|
||||||
self.memory_provider = None
|
|
||||||
|
|
||||||
if self.memory_provider == "mem0":
|
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
||||||
|
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||||
|
memory_provider = crew.memory_config.get("provider")
|
||||||
|
else:
|
||||||
|
memory_provider = None
|
||||||
|
|
||||||
|
if memory_provider == "mem0":
|
||||||
try:
|
try:
|
||||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@@ -36,11 +42,13 @@ class EntityMemory(Memory):
|
|||||||
path=path,
|
path=path,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
super().__init__(storage)
|
|
||||||
|
super().__init__(storage=storage)
|
||||||
|
self._memory_provider = memory_provider
|
||||||
|
|
||||||
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||||
"""Saves an entity item into the SQLite storage."""
|
"""Saves an entity item into the SQLite storage."""
|
||||||
if self.memory_provider == "mem0":
|
if self._memory_provider == "mem0":
|
||||||
data = f"""
|
data = f"""
|
||||||
Remember details about the following entity:
|
Remember details about the following entity:
|
||||||
Name: {item.name}
|
Name: {item.name}
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ class LongTermMemory(Memory):
|
|||||||
def __init__(self, storage=None, path=None):
|
def __init__(self, storage=None, path=None):
|
||||||
if not storage:
|
if not storage:
|
||||||
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
|
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
|
||||||
super().__init__(storage)
|
super().__init__(storage=storage)
|
||||||
|
|
||||||
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||||
metadata = item.metadata
|
metadata = item.metadata
|
||||||
|
|||||||
@@ -1,15 +1,19 @@
|
|||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
from crewai.memory.storage.rag_storage import RAGStorage
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
class Memory:
|
class Memory(BaseModel):
|
||||||
"""
|
"""
|
||||||
Base class for memory, now supporting agent tags and generic metadata.
|
Base class for memory, now supporting agent tags and generic metadata.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, storage: RAGStorage):
|
embedder_config: Optional[Dict[str, Any]] = None
|
||||||
self.storage = storage
|
|
||||||
|
storage: Any
|
||||||
|
|
||||||
|
def __init__(self, storage: Any, **data: Any):
|
||||||
|
super().__init__(storage=storage, **data)
|
||||||
|
|
||||||
def save(
|
def save(
|
||||||
self,
|
self,
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
from typing import Any, Dict, Optional
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
|
from pydantic import PrivateAttr
|
||||||
|
|
||||||
from crewai.memory.memory import Memory
|
from crewai.memory.memory import Memory
|
||||||
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
||||||
from crewai.memory.storage.rag_storage import RAGStorage
|
from crewai.memory.storage.rag_storage import RAGStorage
|
||||||
@@ -14,13 +16,15 @@ class ShortTermMemory(Memory):
|
|||||||
MemoryItem instances.
|
MemoryItem instances.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
_memory_provider: Optional[str] = PrivateAttr()
|
||||||
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
|
||||||
self.memory_provider = crew.memory_config.get("provider")
|
|
||||||
else:
|
|
||||||
self.memory_provider = None
|
|
||||||
|
|
||||||
if self.memory_provider == "mem0":
|
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
||||||
|
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||||
|
memory_provider = crew.memory_config.get("provider")
|
||||||
|
else:
|
||||||
|
memory_provider = None
|
||||||
|
|
||||||
|
if memory_provider == "mem0":
|
||||||
try:
|
try:
|
||||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@@ -39,7 +43,8 @@ class ShortTermMemory(Memory):
|
|||||||
path=path,
|
path=path,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
super().__init__(storage)
|
super().__init__(storage=storage)
|
||||||
|
self._memory_provider = memory_provider
|
||||||
|
|
||||||
def save(
|
def save(
|
||||||
self,
|
self,
|
||||||
@@ -48,7 +53,7 @@ class ShortTermMemory(Memory):
|
|||||||
agent: Optional[str] = None,
|
agent: Optional[str] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
||||||
if self.memory_provider == "mem0":
|
if self._memory_provider == "mem0":
|
||||||
item.data = f"Remember the following insights from Agent run: {item.data}"
|
item.data = f"Remember the following insights from Agent run: {item.data}"
|
||||||
|
|
||||||
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ class BaseRAGStorage(ABC):
|
|||||||
self,
|
self,
|
||||||
type: str,
|
type: str,
|
||||||
allow_reset: bool = True,
|
allow_reset: bool = True,
|
||||||
embedder_config: Optional[Any] = None,
|
embedder_config: Optional[Dict[str, Any]] = None,
|
||||||
crew: Any = None,
|
crew: Any = None,
|
||||||
):
|
):
|
||||||
self.type = type
|
self.type = type
|
||||||
|
|||||||
@@ -1,12 +1,17 @@
|
|||||||
import json
|
import json
|
||||||
|
import logging
|
||||||
import sqlite3
|
import sqlite3
|
||||||
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
from crewai.utilities import Printer
|
from crewai.utilities import Printer
|
||||||
from crewai.utilities.crew_json_encoder import CrewJSONEncoder
|
from crewai.utilities.crew_json_encoder import CrewJSONEncoder
|
||||||
|
from crewai.utilities.errors import DatabaseError, DatabaseOperationError
|
||||||
from crewai.utilities.paths import db_storage_path
|
from crewai.utilities.paths import db_storage_path
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class KickoffTaskOutputsSQLiteStorage:
|
class KickoffTaskOutputsSQLiteStorage:
|
||||||
"""
|
"""
|
||||||
@@ -14,15 +19,24 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self, db_path: str = f"{db_storage_path()}/latest_kickoff_task_outputs.db"
|
self, db_path: Optional[str] = None
|
||||||
) -> None:
|
) -> None:
|
||||||
|
if db_path is None:
|
||||||
|
# Get the parent directory of the default db path and create our db file there
|
||||||
|
db_path = str(Path(db_storage_path()) / "latest_kickoff_task_outputs.db")
|
||||||
self.db_path = db_path
|
self.db_path = db_path
|
||||||
self._printer: Printer = Printer()
|
self._printer: Printer = Printer()
|
||||||
self._initialize_db()
|
self._initialize_db()
|
||||||
|
|
||||||
def _initialize_db(self):
|
def _initialize_db(self) -> None:
|
||||||
"""
|
"""Initialize the SQLite database and create the latest_kickoff_task_outputs table.
|
||||||
Initializes the SQLite database and creates LTM table
|
|
||||||
|
This method sets up the database schema for storing task outputs. It creates
|
||||||
|
a table with columns for task_id, expected_output, output (as JSON),
|
||||||
|
task_index, inputs (as JSON), was_replayed flag, and timestamp.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
DatabaseOperationError: If database initialization fails due to SQLite errors.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
with sqlite3.connect(self.db_path) as conn:
|
with sqlite3.connect(self.db_path) as conn:
|
||||||
@@ -43,10 +57,9 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
|
|
||||||
conn.commit()
|
conn.commit()
|
||||||
except sqlite3.Error as e:
|
except sqlite3.Error as e:
|
||||||
self._printer.print(
|
error_msg = DatabaseError.format_error(DatabaseError.INIT_ERROR, e)
|
||||||
content=f"SAVING KICKOFF TASK OUTPUTS ERROR: An error occurred during database initialization: {e}",
|
logger.error(error_msg)
|
||||||
color="red",
|
raise DatabaseOperationError(error_msg, e)
|
||||||
)
|
|
||||||
|
|
||||||
def add(
|
def add(
|
||||||
self,
|
self,
|
||||||
@@ -55,9 +68,22 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
task_index: int,
|
task_index: int,
|
||||||
was_replayed: bool = False,
|
was_replayed: bool = False,
|
||||||
inputs: Dict[str, Any] = {},
|
inputs: Dict[str, Any] = {},
|
||||||
):
|
) -> None:
|
||||||
|
"""Add a new task output record to the database.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task: The Task object containing task details.
|
||||||
|
output: Dictionary containing the task's output data.
|
||||||
|
task_index: Integer index of the task in the sequence.
|
||||||
|
was_replayed: Boolean indicating if this was a replay execution.
|
||||||
|
inputs: Dictionary of input parameters used for the task.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
DatabaseOperationError: If saving the task output fails due to SQLite errors.
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
with sqlite3.connect(self.db_path) as conn:
|
with sqlite3.connect(self.db_path) as conn:
|
||||||
|
conn.execute("BEGIN TRANSACTION")
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""
|
"""
|
||||||
@@ -76,21 +102,31 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
except sqlite3.Error as e:
|
except sqlite3.Error as e:
|
||||||
self._printer.print(
|
error_msg = DatabaseError.format_error(DatabaseError.SAVE_ERROR, e)
|
||||||
content=f"SAVING KICKOFF TASK OUTPUTS ERROR: An error occurred during database initialization: {e}",
|
logger.error(error_msg)
|
||||||
color="red",
|
raise DatabaseOperationError(error_msg, e)
|
||||||
)
|
|
||||||
|
|
||||||
def update(
|
def update(
|
||||||
self,
|
self,
|
||||||
task_index: int,
|
task_index: int,
|
||||||
**kwargs,
|
**kwargs: Any,
|
||||||
):
|
) -> None:
|
||||||
"""
|
"""Update an existing task output record in the database.
|
||||||
Updates an existing row in the latest_kickoff_task_outputs table based on task_index.
|
|
||||||
|
Updates fields of a task output record identified by task_index. The fields
|
||||||
|
to update are provided as keyword arguments.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task_index: Integer index of the task to update.
|
||||||
|
**kwargs: Arbitrary keyword arguments representing fields to update.
|
||||||
|
Values that are dictionaries will be JSON encoded.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
DatabaseOperationError: If updating the task output fails due to SQLite errors.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
with sqlite3.connect(self.db_path) as conn:
|
with sqlite3.connect(self.db_path) as conn:
|
||||||
|
conn.execute("BEGIN TRANSACTION")
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
|
|
||||||
fields = []
|
fields = []
|
||||||
@@ -110,14 +146,23 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
||||||
if cursor.rowcount == 0:
|
if cursor.rowcount == 0:
|
||||||
self._printer.print(
|
logger.warning(f"No row found with task_index {task_index}. No update performed.")
|
||||||
f"No row found with task_index {task_index}. No update performed.",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
except sqlite3.Error as e:
|
except sqlite3.Error as e:
|
||||||
self._printer.print(f"UPDATE KICKOFF TASK OUTPUTS ERROR: {e}", color="red")
|
error_msg = DatabaseError.format_error(DatabaseError.UPDATE_ERROR, e)
|
||||||
|
logger.error(error_msg)
|
||||||
|
raise DatabaseOperationError(error_msg, e)
|
||||||
|
|
||||||
def load(self) -> Optional[List[Dict[str, Any]]]:
|
def load(self) -> List[Dict[str, Any]]:
|
||||||
|
"""Load all task output records from the database.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of dictionaries containing task output records, ordered by task_index.
|
||||||
|
Each dictionary contains: task_id, expected_output, output, task_index,
|
||||||
|
inputs, was_replayed, and timestamp.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
DatabaseOperationError: If loading task outputs fails due to SQLite errors.
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
with sqlite3.connect(self.db_path) as conn:
|
with sqlite3.connect(self.db_path) as conn:
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
@@ -144,23 +189,26 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
return results
|
return results
|
||||||
|
|
||||||
except sqlite3.Error as e:
|
except sqlite3.Error as e:
|
||||||
self._printer.print(
|
error_msg = DatabaseError.format_error(DatabaseError.LOAD_ERROR, e)
|
||||||
content=f"LOADING KICKOFF TASK OUTPUTS ERROR: An error occurred while querying kickoff task outputs: {e}",
|
logger.error(error_msg)
|
||||||
color="red",
|
raise DatabaseOperationError(error_msg, e)
|
||||||
)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def delete_all(self):
|
def delete_all(self) -> None:
|
||||||
"""
|
"""Delete all task output records from the database.
|
||||||
Deletes all rows from the latest_kickoff_task_outputs table.
|
|
||||||
|
This method removes all records from the latest_kickoff_task_outputs table.
|
||||||
|
Use with caution as this operation cannot be undone.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
DatabaseOperationError: If deleting task outputs fails due to SQLite errors.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
with sqlite3.connect(self.db_path) as conn:
|
with sqlite3.connect(self.db_path) as conn:
|
||||||
|
conn.execute("BEGIN TRANSACTION")
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute("DELETE FROM latest_kickoff_task_outputs")
|
cursor.execute("DELETE FROM latest_kickoff_task_outputs")
|
||||||
conn.commit()
|
conn.commit()
|
||||||
except sqlite3.Error as e:
|
except sqlite3.Error as e:
|
||||||
self._printer.print(
|
error_msg = DatabaseError.format_error(DatabaseError.DELETE_ERROR, e)
|
||||||
content=f"ERROR: Failed to delete all kickoff task outputs: {e}",
|
logger.error(error_msg)
|
||||||
color="red",
|
raise DatabaseOperationError(error_msg, e)
|
||||||
)
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
import sqlite3
|
import sqlite3
|
||||||
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Optional, Union
|
from typing import Any, Dict, List, Optional, Union
|
||||||
|
|
||||||
from crewai.utilities import Printer
|
from crewai.utilities import Printer
|
||||||
@@ -12,10 +13,15 @@ class LTMSQLiteStorage:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self, db_path: str = f"{db_storage_path()}/long_term_memory_storage.db"
|
self, db_path: Optional[str] = None
|
||||||
) -> None:
|
) -> None:
|
||||||
|
if db_path is None:
|
||||||
|
# Get the parent directory of the default db path and create our db file there
|
||||||
|
db_path = str(Path(db_storage_path()) / "long_term_memory_storage.db")
|
||||||
self.db_path = db_path
|
self.db_path = db_path
|
||||||
self._printer: Printer = Printer()
|
self._printer: Printer = Printer()
|
||||||
|
# Ensure parent directory exists
|
||||||
|
Path(self.db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||||
self._initialize_db()
|
self._initialize_db()
|
||||||
|
|
||||||
def _initialize_db(self):
|
def _initialize_db(self):
|
||||||
|
|||||||
@@ -27,10 +27,18 @@ class Mem0Storage(Storage):
|
|||||||
raise ValueError("User ID is required for user memory type")
|
raise ValueError("User ID is required for user memory type")
|
||||||
|
|
||||||
# API key in memory config overrides the environment variable
|
# API key in memory config overrides the environment variable
|
||||||
mem0_api_key = self.memory_config.get("config", {}).get("api_key") or os.getenv(
|
config = self.memory_config.get("config", {})
|
||||||
"MEM0_API_KEY"
|
mem0_api_key = config.get("api_key") or os.getenv("MEM0_API_KEY")
|
||||||
)
|
mem0_org_id = config.get("org_id")
|
||||||
self.memory = MemoryClient(api_key=mem0_api_key)
|
mem0_project_id = config.get("project_id")
|
||||||
|
|
||||||
|
# Initialize MemoryClient with available parameters
|
||||||
|
if mem0_org_id and mem0_project_id:
|
||||||
|
self.memory = MemoryClient(
|
||||||
|
api_key=mem0_api_key, org_id=mem0_org_id, project_id=mem0_project_id
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.memory = MemoryClient(api_key=mem0_api_key)
|
||||||
|
|
||||||
def _sanitize_role(self, role: str) -> str:
|
def _sanitize_role(self, role: str) -> str:
|
||||||
"""
|
"""
|
||||||
@@ -57,7 +65,7 @@ class Mem0Storage(Storage):
|
|||||||
metadata={"type": "long_term", **metadata},
|
metadata={"type": "long_term", **metadata},
|
||||||
)
|
)
|
||||||
elif self.memory_type == "entities":
|
elif self.memory_type == "entities":
|
||||||
entity_name = None
|
entity_name = self._get_agent_name()
|
||||||
self.memory.add(
|
self.memory.add(
|
||||||
value, user_id=entity_name, metadata={"type": "entity", **metadata}
|
value, user_id=entity_name, metadata={"type": "entity", **metadata}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -6,12 +6,17 @@ import shutil
|
|||||||
import uuid
|
import uuid
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
from chromadb.api import ClientAPI
|
from chromadb.api import ClientAPI, Collection
|
||||||
|
from chromadb.api.types import Documents, Embeddings, Metadatas
|
||||||
|
|
||||||
from crewai.memory.storage.base_rag_storage import BaseRAGStorage
|
from crewai.memory.storage.base_rag_storage import BaseRAGStorage
|
||||||
from crewai.utilities import EmbeddingConfigurator
|
from crewai.utilities import EmbeddingConfigurator
|
||||||
from crewai.utilities.constants import MAX_FILE_NAME_LENGTH
|
from crewai.utilities.constants import MAX_FILE_NAME_LENGTH
|
||||||
from crewai.utilities.paths import db_storage_path
|
from crewai.utilities.paths import db_storage_path
|
||||||
|
from crewai.utilities.exceptions.embedding_exceptions import (
|
||||||
|
EmbeddingConfigurationError,
|
||||||
|
EmbeddingInitializationError
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
@@ -32,15 +37,24 @@ def suppress_logging(
|
|||||||
|
|
||||||
|
|
||||||
class RAGStorage(BaseRAGStorage):
|
class RAGStorage(BaseRAGStorage):
|
||||||
"""
|
"""RAG-based Storage implementation using ChromaDB for vector storage and retrieval.
|
||||||
Extends Storage to handle embeddings for memory entries, improving
|
|
||||||
search efficiency.
|
This class extends BaseRAGStorage to handle embeddings for memory entries,
|
||||||
|
improving search efficiency through vector similarity.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
app: ChromaDB client instance
|
||||||
|
collection: ChromaDB collection for storing embeddings
|
||||||
|
type: Type of memory storage
|
||||||
|
allow_reset: Whether memory reset is allowed
|
||||||
|
path: Custom storage path for the database
|
||||||
"""
|
"""
|
||||||
|
|
||||||
app: ClientAPI | None = None
|
app: ClientAPI | None = None
|
||||||
|
collection: Any = None
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self, type, allow_reset=True, embedder_config=None, crew=None, path=None
|
self, type: str, allow_reset: bool = True, embedder_config: Dict[str, Any] | None = None, crew: Any = None, path: str | None = None
|
||||||
):
|
):
|
||||||
super().__init__(type, allow_reset, embedder_config, crew)
|
super().__init__(type, allow_reset, embedder_config, crew)
|
||||||
agents = crew.agents if crew else []
|
agents = crew.agents if crew else []
|
||||||
@@ -50,7 +64,6 @@ class RAGStorage(BaseRAGStorage):
|
|||||||
self.storage_file_name = self._build_storage_file_name(type, agents)
|
self.storage_file_name = self._build_storage_file_name(type, agents)
|
||||||
|
|
||||||
self.type = type
|
self.type = type
|
||||||
|
|
||||||
self.allow_reset = allow_reset
|
self.allow_reset = allow_reset
|
||||||
self.path = path
|
self.path = path
|
||||||
self._initialize_app()
|
self._initialize_app()
|
||||||
@@ -59,26 +72,36 @@ class RAGStorage(BaseRAGStorage):
|
|||||||
configurator = EmbeddingConfigurator()
|
configurator = EmbeddingConfigurator()
|
||||||
self.embedder_config = configurator.configure_embedder(self.embedder_config)
|
self.embedder_config = configurator.configure_embedder(self.embedder_config)
|
||||||
|
|
||||||
def _initialize_app(self):
|
def _initialize_app(self) -> None:
|
||||||
|
"""Initialize the ChromaDB client and collection.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: If ChromaDB client initialization fails
|
||||||
|
EmbeddingConfigurationError: If embedding configuration is invalid
|
||||||
|
EmbeddingInitializationError: If embedding function fails to initialize
|
||||||
|
"""
|
||||||
import chromadb
|
import chromadb
|
||||||
from chromadb.config import Settings
|
from chromadb.config import Settings
|
||||||
|
|
||||||
self._set_embedder_config()
|
self._set_embedder_config()
|
||||||
chroma_client = chromadb.PersistentClient(
|
|
||||||
path=self.path if self.path else self.storage_file_name,
|
|
||||||
settings=Settings(allow_reset=self.allow_reset),
|
|
||||||
)
|
|
||||||
|
|
||||||
self.app = chroma_client
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.collection = self.app.get_collection(
|
self.app = chromadb.PersistentClient(
|
||||||
name=self.type, embedding_function=self.embedder_config
|
path=self.path if self.path else self.storage_file_name,
|
||||||
)
|
settings=Settings(allow_reset=self.allow_reset),
|
||||||
except Exception:
|
|
||||||
self.collection = self.app.create_collection(
|
|
||||||
name=self.type, embedding_function=self.embedder_config
|
|
||||||
)
|
)
|
||||||
|
if not self.app:
|
||||||
|
raise RuntimeError("Failed to initialize ChromaDB client")
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.collection = self.app.get_collection(
|
||||||
|
name=self.type, embedding_function=self.embedder_config
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
self.collection = self.app.create_collection(
|
||||||
|
name=self.type, embedding_function=self.embedder_config
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
raise RuntimeError(f"Failed to initialize ChromaDB: {str(e)}")
|
||||||
|
|
||||||
def _sanitize_role(self, role: str) -> str:
|
def _sanitize_role(self, role: str) -> str:
|
||||||
"""
|
"""
|
||||||
@@ -101,12 +124,21 @@ class RAGStorage(BaseRAGStorage):
|
|||||||
return f"{base_path}/{file_name}"
|
return f"{base_path}/{file_name}"
|
||||||
|
|
||||||
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||||
|
"""Save a value with metadata to the memory storage.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
value: The text content to store
|
||||||
|
metadata: Additional metadata for the stored content
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
EmbeddingInitializationError: If embedding generation fails
|
||||||
|
"""
|
||||||
if not hasattr(self, "app") or not hasattr(self, "collection"):
|
if not hasattr(self, "app") or not hasattr(self, "collection"):
|
||||||
self._initialize_app()
|
self._initialize_app()
|
||||||
try:
|
try:
|
||||||
self._generate_embedding(value, metadata)
|
self._generate_embedding(value, metadata)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(f"Error during {self.type} save: {str(e)}")
|
raise EmbeddingInitializationError(self.type, str(e))
|
||||||
|
|
||||||
def search(
|
def search(
|
||||||
self,
|
self,
|
||||||
@@ -114,7 +146,18 @@ class RAGStorage(BaseRAGStorage):
|
|||||||
limit: int = 3,
|
limit: int = 3,
|
||||||
filter: Optional[dict] = None,
|
filter: Optional[dict] = None,
|
||||||
score_threshold: float = 0.35,
|
score_threshold: float = 0.35,
|
||||||
) -> List[Any]:
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""Search for similar content in memory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query: The search query text
|
||||||
|
limit: Maximum number of results to return
|
||||||
|
filter: Optional filter criteria
|
||||||
|
score_threshold: Minimum similarity score threshold
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of matching results with metadata and scores
|
||||||
|
"""
|
||||||
if not hasattr(self, "app"):
|
if not hasattr(self, "app"):
|
||||||
self._initialize_app()
|
self._initialize_app()
|
||||||
|
|
||||||
@@ -138,37 +181,50 @@ class RAGStorage(BaseRAGStorage):
|
|||||||
logging.error(f"Error during {self.type} search: {str(e)}")
|
logging.error(f"Error during {self.type} search: {str(e)}")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def _generate_embedding(self, text: str, metadata: Dict[str, Any]) -> None: # type: ignore
|
def _generate_embedding(self, text: str, metadata: Optional[Dict[str, Any]] = None) -> Any:
|
||||||
|
"""Generate and store embeddings for the given text.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
text: The text to generate embeddings for
|
||||||
|
metadata: Optional additional metadata to store with the embeddings
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Any: The generated embedding or None if only storing
|
||||||
|
"""
|
||||||
if not hasattr(self, "app") or not hasattr(self, "collection"):
|
if not hasattr(self, "app") or not hasattr(self, "collection"):
|
||||||
self._initialize_app()
|
self._initialize_app()
|
||||||
|
|
||||||
self.collection.add(
|
try:
|
||||||
documents=[text],
|
self.collection.add(
|
||||||
metadatas=[metadata or {}],
|
documents=[text],
|
||||||
ids=[str(uuid.uuid4())],
|
metadatas=[metadata or {}],
|
||||||
)
|
ids=[str(uuid.uuid4())],
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
raise EmbeddingInitializationError(self.type, f"Failed to generate embedding: {str(e)}")
|
||||||
|
|
||||||
def reset(self) -> None:
|
def reset(self) -> None:
|
||||||
|
"""Reset the memory storage by clearing the database and removing files.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: If memory reset fails and allow_reset is False
|
||||||
|
EmbeddingConfigurationError: If embedding configuration is invalid during reinitialization
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
if self.app:
|
if self.app:
|
||||||
self.app.reset()
|
self.app.reset()
|
||||||
shutil.rmtree(f"{db_storage_path()}/{self.type}")
|
storage_path = self.path if self.path else db_storage_path()
|
||||||
|
db_dir = os.path.join(storage_path, self.type)
|
||||||
|
if os.path.exists(db_dir):
|
||||||
|
shutil.rmtree(db_dir)
|
||||||
self.app = None
|
self.app = None
|
||||||
self.collection = None
|
self.collection = None
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if "attempt to write a readonly database" in str(e):
|
if "attempt to write a readonly database" in str(e):
|
||||||
# Ignore this specific error
|
# Ignore this specific error as it's expected in some environments
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
raise Exception(
|
if not self.allow_reset:
|
||||||
f"An error occurred while resetting the {self.type} memory: {e}"
|
raise RuntimeError(f"Failed to reset {self.type} memory: {str(e)}")
|
||||||
)
|
logging.error(f"Error during {self.type} memory reset: {str(e)}")
|
||||||
|
|
||||||
def _create_default_embedding_function(self):
|
|
||||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
|
||||||
OpenAIEmbeddingFunction,
|
|
||||||
)
|
|
||||||
|
|
||||||
return OpenAIEmbeddingFunction(
|
|
||||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -4,18 +4,23 @@ from typing import Callable
|
|||||||
from crewai import Crew
|
from crewai import Crew
|
||||||
from crewai.project.utils import memoize
|
from crewai.project.utils import memoize
|
||||||
|
|
||||||
|
"""Decorators for defining crew components and their behaviors."""
|
||||||
|
|
||||||
|
|
||||||
def before_kickoff(func):
|
def before_kickoff(func):
|
||||||
|
"""Marks a method to execute before crew kickoff."""
|
||||||
func.is_before_kickoff = True
|
func.is_before_kickoff = True
|
||||||
return func
|
return func
|
||||||
|
|
||||||
|
|
||||||
def after_kickoff(func):
|
def after_kickoff(func):
|
||||||
|
"""Marks a method to execute after crew kickoff."""
|
||||||
func.is_after_kickoff = True
|
func.is_after_kickoff = True
|
||||||
return func
|
return func
|
||||||
|
|
||||||
|
|
||||||
def task(func):
|
def task(func):
|
||||||
|
"""Marks a method as a crew task."""
|
||||||
func.is_task = True
|
func.is_task = True
|
||||||
|
|
||||||
@wraps(func)
|
@wraps(func)
|
||||||
@@ -29,43 +34,51 @@ def task(func):
|
|||||||
|
|
||||||
|
|
||||||
def agent(func):
|
def agent(func):
|
||||||
|
"""Marks a method as a crew agent."""
|
||||||
func.is_agent = True
|
func.is_agent = True
|
||||||
func = memoize(func)
|
func = memoize(func)
|
||||||
return func
|
return func
|
||||||
|
|
||||||
|
|
||||||
def llm(func):
|
def llm(func):
|
||||||
|
"""Marks a method as an LLM provider."""
|
||||||
func.is_llm = True
|
func.is_llm = True
|
||||||
func = memoize(func)
|
func = memoize(func)
|
||||||
return func
|
return func
|
||||||
|
|
||||||
|
|
||||||
def output_json(cls):
|
def output_json(cls):
|
||||||
|
"""Marks a class as JSON output format."""
|
||||||
cls.is_output_json = True
|
cls.is_output_json = True
|
||||||
return cls
|
return cls
|
||||||
|
|
||||||
|
|
||||||
def output_pydantic(cls):
|
def output_pydantic(cls):
|
||||||
|
"""Marks a class as Pydantic output format."""
|
||||||
cls.is_output_pydantic = True
|
cls.is_output_pydantic = True
|
||||||
return cls
|
return cls
|
||||||
|
|
||||||
|
|
||||||
def tool(func):
|
def tool(func):
|
||||||
|
"""Marks a method as a crew tool."""
|
||||||
func.is_tool = True
|
func.is_tool = True
|
||||||
return memoize(func)
|
return memoize(func)
|
||||||
|
|
||||||
|
|
||||||
def callback(func):
|
def callback(func):
|
||||||
|
"""Marks a method as a crew callback."""
|
||||||
func.is_callback = True
|
func.is_callback = True
|
||||||
return memoize(func)
|
return memoize(func)
|
||||||
|
|
||||||
|
|
||||||
def cache_handler(func):
|
def cache_handler(func):
|
||||||
|
"""Marks a method as a cache handler."""
|
||||||
func.is_cache_handler = True
|
func.is_cache_handler = True
|
||||||
return memoize(func)
|
return memoize(func)
|
||||||
|
|
||||||
|
|
||||||
def crew(func) -> Callable[..., Crew]:
|
def crew(func) -> Callable[..., Crew]:
|
||||||
|
"""Marks a method as the main crew execution point."""
|
||||||
|
|
||||||
@wraps(func)
|
@wraps(func)
|
||||||
def wrapper(self, *args, **kwargs) -> Crew:
|
def wrapper(self, *args, **kwargs) -> Crew:
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import inspect
|
import inspect
|
||||||
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Callable, Dict, TypeVar, cast
|
from typing import Any, Callable, Dict, TypeVar, cast
|
||||||
|
|
||||||
@@ -7,10 +8,16 @@ from dotenv import load_dotenv
|
|||||||
|
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.WARNING)
|
||||||
|
|
||||||
T = TypeVar("T", bound=type)
|
T = TypeVar("T", bound=type)
|
||||||
|
|
||||||
|
"""Base decorator for creating crew classes with configuration and function management."""
|
||||||
|
|
||||||
|
|
||||||
def CrewBase(cls: T) -> T:
|
def CrewBase(cls: T) -> T:
|
||||||
|
"""Wraps a class with crew functionality and configuration management."""
|
||||||
|
|
||||||
class WrappedClass(cls): # type: ignore
|
class WrappedClass(cls): # type: ignore
|
||||||
is_crew_class: bool = True # type: ignore
|
is_crew_class: bool = True # type: ignore
|
||||||
|
|
||||||
@@ -24,16 +31,9 @@ def CrewBase(cls: T) -> T:
|
|||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
self.load_configurations()
|
||||||
agents_config_path = self.base_directory / self.original_agents_config_path
|
|
||||||
tasks_config_path = self.base_directory / self.original_tasks_config_path
|
|
||||||
|
|
||||||
self.agents_config = self.load_yaml(agents_config_path)
|
|
||||||
self.tasks_config = self.load_yaml(tasks_config_path)
|
|
||||||
|
|
||||||
self.map_all_agent_variables()
|
self.map_all_agent_variables()
|
||||||
self.map_all_task_variables()
|
self.map_all_task_variables()
|
||||||
|
|
||||||
# Preserve all decorated functions
|
# Preserve all decorated functions
|
||||||
self._original_functions = {
|
self._original_functions = {
|
||||||
name: method
|
name: method
|
||||||
@@ -49,7 +49,6 @@ def CrewBase(cls: T) -> T:
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
# Store specific function types
|
# Store specific function types
|
||||||
self._original_tasks = self._filter_functions(
|
self._original_tasks = self._filter_functions(
|
||||||
self._original_functions, "is_task"
|
self._original_functions, "is_task"
|
||||||
@@ -67,6 +66,44 @@ def CrewBase(cls: T) -> T:
|
|||||||
self._original_functions, "is_kickoff"
|
self._original_functions, "is_kickoff"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def load_configurations(self):
|
||||||
|
"""Load agent and task configurations from YAML files."""
|
||||||
|
if isinstance(self.original_agents_config_path, str):
|
||||||
|
agents_config_path = (
|
||||||
|
self.base_directory / self.original_agents_config_path
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
self.agents_config = self.load_yaml(agents_config_path)
|
||||||
|
except FileNotFoundError:
|
||||||
|
logging.warning(
|
||||||
|
f"Agent config file not found at {agents_config_path}. "
|
||||||
|
"Proceeding with empty agent configurations."
|
||||||
|
)
|
||||||
|
self.agents_config = {}
|
||||||
|
else:
|
||||||
|
logging.warning(
|
||||||
|
"No agent configuration path provided. Proceeding with empty agent configurations."
|
||||||
|
)
|
||||||
|
self.agents_config = {}
|
||||||
|
|
||||||
|
if isinstance(self.original_tasks_config_path, str):
|
||||||
|
tasks_config_path = (
|
||||||
|
self.base_directory / self.original_tasks_config_path
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
self.tasks_config = self.load_yaml(tasks_config_path)
|
||||||
|
except FileNotFoundError:
|
||||||
|
logging.warning(
|
||||||
|
f"Task config file not found at {tasks_config_path}. "
|
||||||
|
"Proceeding with empty task configurations."
|
||||||
|
)
|
||||||
|
self.tasks_config = {}
|
||||||
|
else:
|
||||||
|
logging.warning(
|
||||||
|
"No task configuration path provided. Proceeding with empty task configurations."
|
||||||
|
)
|
||||||
|
self.tasks_config = {}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load_yaml(config_path: Path):
|
def load_yaml(config_path: Path):
|
||||||
try:
|
try:
|
||||||
@@ -216,5 +253,5 @@ def CrewBase(cls: T) -> T:
|
|||||||
# Include base class (qual)name in the wrapper class (qual)name.
|
# Include base class (qual)name in the wrapper class (qual)name.
|
||||||
WrappedClass.__name__ = CrewBase.__name__ + "(" + cls.__name__ + ")"
|
WrappedClass.__name__ = CrewBase.__name__ + "(" + cls.__name__ + ")"
|
||||||
WrappedClass.__qualname__ = CrewBase.__qualname__ + "(" + cls.__name__ + ")"
|
WrappedClass.__qualname__ = CrewBase.__qualname__ + "(" + cls.__name__ + ")"
|
||||||
|
|
||||||
return cast(T, WrappedClass)
|
return cast(T, WrappedClass)
|
||||||
|
|||||||
@@ -9,11 +9,13 @@ from copy import copy
|
|||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import (
|
from typing import (
|
||||||
|
AbstractSet,
|
||||||
Any,
|
Any,
|
||||||
Callable,
|
Callable,
|
||||||
ClassVar,
|
ClassVar,
|
||||||
Dict,
|
Dict,
|
||||||
List,
|
List,
|
||||||
|
Mapping,
|
||||||
Optional,
|
Optional,
|
||||||
Set,
|
Set,
|
||||||
Tuple,
|
Tuple,
|
||||||
@@ -41,6 +43,7 @@ from crewai.tools.base_tool import BaseTool
|
|||||||
from crewai.utilities.config import process_config
|
from crewai.utilities.config import process_config
|
||||||
from crewai.utilities.converter import Converter, convert_to_model
|
from crewai.utilities.converter import Converter, convert_to_model
|
||||||
from crewai.utilities.i18n import I18N
|
from crewai.utilities.i18n import I18N
|
||||||
|
from crewai.utilities.printer import Printer
|
||||||
|
|
||||||
|
|
||||||
class Task(BaseModel):
|
class Task(BaseModel):
|
||||||
@@ -108,7 +111,7 @@ class Task(BaseModel):
|
|||||||
description="Task output, it's final result after being executed", default=None
|
description="Task output, it's final result after being executed", default=None
|
||||||
)
|
)
|
||||||
tools: Optional[List[BaseTool]] = Field(
|
tools: Optional[List[BaseTool]] = Field(
|
||||||
default_factory=list,
|
default_factory=lambda: [],
|
||||||
description="Tools the agent is limited to use for this task.",
|
description="Tools the agent is limited to use for this task.",
|
||||||
)
|
)
|
||||||
id: UUID4 = Field(
|
id: UUID4 = Field(
|
||||||
@@ -124,41 +127,43 @@ class Task(BaseModel):
|
|||||||
description="A converter class used to export structured output",
|
description="A converter class used to export structured output",
|
||||||
default=None,
|
default=None,
|
||||||
)
|
)
|
||||||
processed_by_agents: Set[str] = Field(default_factory=set)
|
processed_by_agents: Set[str] = Field(default_factory=lambda: set())
|
||||||
guardrail: Optional[Callable[[TaskOutput], Tuple[bool, Any]]] = Field(
|
guardrail: Optional[Callable[[TaskOutput], Tuple[bool, Any]]] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="Function to validate task output before proceeding to next task"
|
description="Function to validate task output before proceeding to next task",
|
||||||
)
|
)
|
||||||
max_retries: int = Field(
|
max_retries: int = Field(
|
||||||
default=3,
|
default=3, description="Maximum number of retries when guardrail fails"
|
||||||
description="Maximum number of retries when guardrail fails"
|
|
||||||
)
|
)
|
||||||
retry_count: int = Field(
|
retry_count: int = Field(default=0, description="Current number of retries")
|
||||||
default=0,
|
start_time: Optional[datetime.datetime] = Field(
|
||||||
description="Current number of retries"
|
default=None, description="Start time of the task execution"
|
||||||
|
)
|
||||||
|
end_time: Optional[datetime.datetime] = Field(
|
||||||
|
default=None, description="End time of the task execution"
|
||||||
)
|
)
|
||||||
|
|
||||||
@field_validator("guardrail")
|
@field_validator("guardrail")
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_guardrail_function(cls, v: Optional[Callable]) -> Optional[Callable]:
|
def validate_guardrail_function(cls, v: Optional[Callable]) -> Optional[Callable]:
|
||||||
"""Validate that the guardrail function has the correct signature and behavior.
|
"""Validate that the guardrail function has the correct signature and behavior.
|
||||||
|
|
||||||
While type hints provide static checking, this validator ensures runtime safety by:
|
While type hints provide static checking, this validator ensures runtime safety by:
|
||||||
1. Verifying the function accepts exactly one parameter (the TaskOutput)
|
1. Verifying the function accepts exactly one parameter (the TaskOutput)
|
||||||
2. Checking return type annotations match Tuple[bool, Any] if present
|
2. Checking return type annotations match Tuple[bool, Any] if present
|
||||||
3. Providing clear, immediate error messages for debugging
|
3. Providing clear, immediate error messages for debugging
|
||||||
|
|
||||||
This runtime validation is crucial because:
|
This runtime validation is crucial because:
|
||||||
- Type hints are optional and can be ignored at runtime
|
- Type hints are optional and can be ignored at runtime
|
||||||
- Function signatures need immediate validation before task execution
|
- Function signatures need immediate validation before task execution
|
||||||
- Clear error messages help users debug guardrail implementation issues
|
- Clear error messages help users debug guardrail implementation issues
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
v: The guardrail function to validate
|
v: The guardrail function to validate
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The validated guardrail function
|
The validated guardrail function
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If the function signature is invalid or return annotation
|
ValueError: If the function signature is invalid or return annotation
|
||||||
doesn't match Tuple[bool, Any]
|
doesn't match Tuple[bool, Any]
|
||||||
@@ -171,8 +176,13 @@ class Task(BaseModel):
|
|||||||
# Check return annotation if present, but don't require it
|
# Check return annotation if present, but don't require it
|
||||||
return_annotation = sig.return_annotation
|
return_annotation = sig.return_annotation
|
||||||
if return_annotation != inspect.Signature.empty:
|
if return_annotation != inspect.Signature.empty:
|
||||||
if not (return_annotation == Tuple[bool, Any] or str(return_annotation) == 'Tuple[bool, Any]'):
|
if not (
|
||||||
raise ValueError("If return type is annotated, it must be Tuple[bool, Any]")
|
return_annotation == Tuple[bool, Any]
|
||||||
|
or str(return_annotation) == "Tuple[bool, Any]"
|
||||||
|
):
|
||||||
|
raise ValueError(
|
||||||
|
"If return type is annotated, it must be Tuple[bool, Any]"
|
||||||
|
)
|
||||||
return v
|
return v
|
||||||
|
|
||||||
_telemetry: Telemetry = PrivateAttr(default_factory=Telemetry)
|
_telemetry: Telemetry = PrivateAttr(default_factory=Telemetry)
|
||||||
@@ -181,7 +191,6 @@ class Task(BaseModel):
|
|||||||
_original_expected_output: Optional[str] = PrivateAttr(default=None)
|
_original_expected_output: Optional[str] = PrivateAttr(default=None)
|
||||||
_original_output_file: Optional[str] = PrivateAttr(default=None)
|
_original_output_file: Optional[str] = PrivateAttr(default=None)
|
||||||
_thread: Optional[threading.Thread] = PrivateAttr(default=None)
|
_thread: Optional[threading.Thread] = PrivateAttr(default=None)
|
||||||
_execution_time: Optional[float] = PrivateAttr(default=None)
|
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -206,25 +215,19 @@ class Task(BaseModel):
|
|||||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||||
)
|
)
|
||||||
|
|
||||||
def _set_start_execution_time(self) -> float:
|
|
||||||
return datetime.datetime.now().timestamp()
|
|
||||||
|
|
||||||
def _set_end_execution_time(self, start_time: float) -> None:
|
|
||||||
self._execution_time = datetime.datetime.now().timestamp() - start_time
|
|
||||||
|
|
||||||
@field_validator("output_file")
|
@field_validator("output_file")
|
||||||
@classmethod
|
@classmethod
|
||||||
def output_file_validation(cls, value: Optional[str]) -> Optional[str]:
|
def output_file_validation(cls, value: Optional[str]) -> Optional[str]:
|
||||||
"""Validate the output file path.
|
"""Validate the output file path.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
value: The output file path to validate. Can be None or a string.
|
value: The output file path to validate. Can be None or a string.
|
||||||
If the path contains template variables (e.g. {var}), leading slashes are preserved.
|
If the path contains template variables (e.g. {var}), leading slashes are preserved.
|
||||||
For regular paths, leading slashes are stripped.
|
For regular paths, leading slashes are stripped.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The validated and potentially modified path, or None if no path was provided.
|
The validated and potentially modified path, or None if no path was provided.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If the path contains invalid characters, path traversal attempts,
|
ValueError: If the path contains invalid characters, path traversal attempts,
|
||||||
or other security concerns.
|
or other security concerns.
|
||||||
@@ -234,18 +237,24 @@ class Task(BaseModel):
|
|||||||
|
|
||||||
# Basic security checks
|
# Basic security checks
|
||||||
if ".." in value:
|
if ".." in value:
|
||||||
raise ValueError("Path traversal attempts are not allowed in output_file paths")
|
raise ValueError(
|
||||||
|
"Path traversal attempts are not allowed in output_file paths"
|
||||||
|
)
|
||||||
|
|
||||||
# Check for shell expansion first
|
# Check for shell expansion first
|
||||||
if value.startswith('~') or value.startswith('$'):
|
if value.startswith("~") or value.startswith("$"):
|
||||||
raise ValueError("Shell expansion characters are not allowed in output_file paths")
|
raise ValueError(
|
||||||
|
"Shell expansion characters are not allowed in output_file paths"
|
||||||
|
)
|
||||||
|
|
||||||
# Then check other shell special characters
|
# Then check other shell special characters
|
||||||
if any(char in value for char in ['|', '>', '<', '&', ';']):
|
if any(char in value for char in ["|", ">", "<", "&", ";"]):
|
||||||
raise ValueError("Shell special characters are not allowed in output_file paths")
|
raise ValueError(
|
||||||
|
"Shell special characters are not allowed in output_file paths"
|
||||||
|
)
|
||||||
|
|
||||||
# Don't strip leading slash if it's a template path with variables
|
# Don't strip leading slash if it's a template path with variables
|
||||||
if "{" in value or "}" in value:
|
if "{" in value or "}" in value:
|
||||||
# Validate template variable format
|
# Validate template variable format
|
||||||
template_vars = [part.split("}")[0] for part in value.split("{")[1:]]
|
template_vars = [part.split("}")[0] for part in value.split("{")[1:]]
|
||||||
for var in template_vars:
|
for var in template_vars:
|
||||||
@@ -302,6 +311,12 @@ class Task(BaseModel):
|
|||||||
|
|
||||||
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def execution_duration(self) -> float | None:
|
||||||
|
if not self.start_time or not self.end_time:
|
||||||
|
return None
|
||||||
|
return (self.end_time - self.start_time).total_seconds()
|
||||||
|
|
||||||
def execute_async(
|
def execute_async(
|
||||||
self,
|
self,
|
||||||
agent: BaseAgent | None = None,
|
agent: BaseAgent | None = None,
|
||||||
@@ -342,7 +357,7 @@ class Task(BaseModel):
|
|||||||
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
|
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
|
||||||
)
|
)
|
||||||
|
|
||||||
start_time = self._set_start_execution_time()
|
self.start_time = datetime.datetime.now()
|
||||||
self._execution_span = self._telemetry.task_started(crew=agent.crew, task=self)
|
self._execution_span = self._telemetry.task_started(crew=agent.crew, task=self)
|
||||||
|
|
||||||
self.prompt_context = context
|
self.prompt_context = context
|
||||||
@@ -378,10 +393,14 @@ class Task(BaseModel):
|
|||||||
)
|
)
|
||||||
|
|
||||||
self.retry_count += 1
|
self.retry_count += 1
|
||||||
context = (
|
context = self.i18n.errors("validation_error").format(
|
||||||
f"### Previous attempt failed validation: {guardrail_result.error}\n\n\n"
|
guardrail_result_error=guardrail_result.error,
|
||||||
f"### Previous result:\n{task_output.raw}\n\n\n"
|
task_output=task_output.raw,
|
||||||
"Try again, making sure to address the validation error."
|
)
|
||||||
|
printer = Printer()
|
||||||
|
printer.print(
|
||||||
|
content=f"Guardrail blocked, retrying, due to: {guardrail_result.error}\n",
|
||||||
|
color="yellow",
|
||||||
)
|
)
|
||||||
return self._execute_core(agent, context, tools)
|
return self._execute_core(agent, context, tools)
|
||||||
|
|
||||||
@@ -392,18 +411,24 @@ class Task(BaseModel):
|
|||||||
|
|
||||||
if isinstance(guardrail_result.result, str):
|
if isinstance(guardrail_result.result, str):
|
||||||
task_output.raw = guardrail_result.result
|
task_output.raw = guardrail_result.result
|
||||||
pydantic_output, json_output = self._export_output(guardrail_result.result)
|
pydantic_output, json_output = self._export_output(
|
||||||
|
guardrail_result.result
|
||||||
|
)
|
||||||
task_output.pydantic = pydantic_output
|
task_output.pydantic = pydantic_output
|
||||||
task_output.json_dict = json_output
|
task_output.json_dict = json_output
|
||||||
elif isinstance(guardrail_result.result, TaskOutput):
|
elif isinstance(guardrail_result.result, TaskOutput):
|
||||||
task_output = guardrail_result.result
|
task_output = guardrail_result.result
|
||||||
|
|
||||||
self.output = task_output
|
self.output = task_output
|
||||||
|
self.end_time = datetime.datetime.now()
|
||||||
|
|
||||||
self._set_end_execution_time(start_time)
|
|
||||||
if self.callback:
|
if self.callback:
|
||||||
self.callback(self.output)
|
self.callback(self.output)
|
||||||
|
|
||||||
|
crew = self.agent.crew # type: ignore[union-attr]
|
||||||
|
if crew and crew.task_callback and crew.task_callback != self.callback:
|
||||||
|
crew.task_callback(self.output)
|
||||||
|
|
||||||
if self._execution_span:
|
if self._execution_span:
|
||||||
self._telemetry.task_ended(self._execution_span, self, agent.crew)
|
self._telemetry.task_ended(self._execution_span, self, agent.crew)
|
||||||
self._execution_span = None
|
self._execution_span = None
|
||||||
@@ -412,7 +437,9 @@ class Task(BaseModel):
|
|||||||
content = (
|
content = (
|
||||||
json_output
|
json_output
|
||||||
if json_output
|
if json_output
|
||||||
else pydantic_output.model_dump_json() if pydantic_output else result
|
else pydantic_output.model_dump_json()
|
||||||
|
if pydantic_output
|
||||||
|
else result
|
||||||
)
|
)
|
||||||
self._save_file(content)
|
self._save_file(content)
|
||||||
|
|
||||||
@@ -432,13 +459,16 @@ class Task(BaseModel):
|
|||||||
tasks_slices = [self.description, output]
|
tasks_slices = [self.description, output]
|
||||||
return "\n".join(tasks_slices)
|
return "\n".join(tasks_slices)
|
||||||
|
|
||||||
def interpolate_inputs(self, inputs: Dict[str, Union[str, int, float]]) -> None:
|
def interpolate_inputs_and_add_conversation_history(
|
||||||
|
self, inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]]
|
||||||
|
) -> None:
|
||||||
"""Interpolate inputs into the task description, expected output, and output file path.
|
"""Interpolate inputs into the task description, expected output, and output file path.
|
||||||
|
Add conversation history if present.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
inputs: Dictionary mapping template variables to their values.
|
inputs: Dictionary mapping template variables to their values.
|
||||||
Supported value types are strings, integers, and floats.
|
Supported value types are strings, integers, and floats.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If a required template variable is missing from inputs.
|
ValueError: If a required template variable is missing from inputs.
|
||||||
"""
|
"""
|
||||||
@@ -455,7 +485,9 @@ class Task(BaseModel):
|
|||||||
try:
|
try:
|
||||||
self.description = self._original_description.format(**inputs)
|
self.description = self._original_description.format(**inputs)
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
raise ValueError(f"Missing required template variable '{e.args[0]}' in description") from e
|
raise ValueError(
|
||||||
|
f"Missing required template variable '{e.args[0]}' in description"
|
||||||
|
) from e
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
raise ValueError(f"Error interpolating description: {str(e)}") from e
|
raise ValueError(f"Error interpolating description: {str(e)}") from e
|
||||||
|
|
||||||
@@ -472,39 +504,86 @@ class Task(BaseModel):
|
|||||||
input_string=self._original_output_file, inputs=inputs
|
input_string=self._original_output_file, inputs=inputs
|
||||||
)
|
)
|
||||||
except (KeyError, ValueError) as e:
|
except (KeyError, ValueError) as e:
|
||||||
raise ValueError(f"Error interpolating output_file path: {str(e)}") from e
|
raise ValueError(
|
||||||
|
f"Error interpolating output_file path: {str(e)}"
|
||||||
|
) from e
|
||||||
|
|
||||||
def interpolate_only(self, input_string: Optional[str], inputs: Dict[str, Union[str, int, float]]) -> str:
|
if "crew_chat_messages" in inputs and inputs["crew_chat_messages"]:
|
||||||
|
conversation_instruction = self.i18n.slice(
|
||||||
|
"conversation_history_instruction"
|
||||||
|
)
|
||||||
|
|
||||||
|
crew_chat_messages_json = str(inputs["crew_chat_messages"])
|
||||||
|
|
||||||
|
try:
|
||||||
|
crew_chat_messages = json.loads(crew_chat_messages_json)
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
print("An error occurred while parsing crew chat messages:", e)
|
||||||
|
raise
|
||||||
|
|
||||||
|
conversation_history = "\n".join(
|
||||||
|
f"{msg['role'].capitalize()}: {msg['content']}"
|
||||||
|
for msg in crew_chat_messages
|
||||||
|
if isinstance(msg, dict) and "role" in msg and "content" in msg
|
||||||
|
)
|
||||||
|
|
||||||
|
self.description += (
|
||||||
|
f"\n\n{conversation_instruction}\n\n{conversation_history}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def interpolate_only(
|
||||||
|
self,
|
||||||
|
input_string: Optional[str],
|
||||||
|
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]],
|
||||||
|
) -> str:
|
||||||
"""Interpolate placeholders (e.g., {key}) in a string while leaving JSON untouched.
|
"""Interpolate placeholders (e.g., {key}) in a string while leaving JSON untouched.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
input_string: The string containing template variables to interpolate.
|
input_string: The string containing template variables to interpolate.
|
||||||
Can be None or empty, in which case an empty string is returned.
|
Can be None or empty, in which case an empty string is returned.
|
||||||
inputs: Dictionary mapping template variables to their values.
|
inputs: Dictionary mapping template variables to their values.
|
||||||
Supported value types are strings, integers, and floats.
|
Supported value types are strings, integers, floats, and dicts/lists
|
||||||
If input_string is empty or has no placeholders, inputs can be empty.
|
containing only these types and other nested dicts/lists.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The interpolated string with all template variables replaced with their values.
|
The interpolated string with all template variables replaced with their values.
|
||||||
Empty string if input_string is None or empty.
|
Empty string if input_string is None or empty.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If a required template variable is missing from inputs.
|
ValueError: If a value contains unsupported types
|
||||||
KeyError: If a template variable is not found in the inputs dictionary.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# Validation function for recursive type checking
|
||||||
|
def validate_type(value: Any) -> None:
|
||||||
|
if value is None:
|
||||||
|
return
|
||||||
|
if isinstance(value, (str, int, float, bool)):
|
||||||
|
return
|
||||||
|
if isinstance(value, (dict, list)):
|
||||||
|
for item in value.values() if isinstance(value, dict) else value:
|
||||||
|
validate_type(item)
|
||||||
|
return
|
||||||
|
raise ValueError(
|
||||||
|
f"Unsupported type {type(value).__name__} in inputs. "
|
||||||
|
"Only str, int, float, bool, dict, and list are allowed."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Validate all input values
|
||||||
|
for key, value in inputs.items():
|
||||||
|
try:
|
||||||
|
validate_type(value)
|
||||||
|
except ValueError as e:
|
||||||
|
raise ValueError(f"Invalid value for key '{key}': {str(e)}") from e
|
||||||
|
|
||||||
if input_string is None or not input_string:
|
if input_string is None or not input_string:
|
||||||
return ""
|
return ""
|
||||||
if "{" not in input_string and "}" not in input_string:
|
if "{" not in input_string and "}" not in input_string:
|
||||||
return input_string
|
return input_string
|
||||||
if not inputs:
|
if not inputs:
|
||||||
raise ValueError("Inputs dictionary cannot be empty when interpolating variables")
|
raise ValueError(
|
||||||
|
"Inputs dictionary cannot be empty when interpolating variables"
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
# Validate input types
|
|
||||||
for key, value in inputs.items():
|
|
||||||
if not isinstance(value, (str, int, float)):
|
|
||||||
raise ValueError(f"Value for key '{key}' must be a string, integer, or float, got {type(value).__name__}")
|
|
||||||
|
|
||||||
escaped_string = input_string.replace("{", "{{").replace("}", "}}")
|
escaped_string = input_string.replace("{", "{{").replace("}", "}}")
|
||||||
|
|
||||||
for key in inputs.keys():
|
for key in inputs.keys():
|
||||||
@@ -512,7 +591,9 @@ class Task(BaseModel):
|
|||||||
|
|
||||||
return escaped_string.format(**inputs)
|
return escaped_string.format(**inputs)
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
raise KeyError(f"Template variable '{e.args[0]}' not found in inputs dictionary") from e
|
raise KeyError(
|
||||||
|
f"Template variable '{e.args[0]}' not found in inputs dictionary"
|
||||||
|
) from e
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
raise ValueError(f"Error during string interpolation: {str(e)}") from e
|
raise ValueError(f"Error during string interpolation: {str(e)}") from e
|
||||||
|
|
||||||
@@ -527,37 +608,56 @@ class Task(BaseModel):
|
|||||||
self.delegations += 1
|
self.delegations += 1
|
||||||
|
|
||||||
def copy(
|
def copy(
|
||||||
|
self,
|
||||||
|
*,
|
||||||
|
include: Optional[AbstractSet[int] | AbstractSet[str] | Mapping[int, Any] | Mapping[str, Any]] = None,
|
||||||
|
exclude: Optional[AbstractSet[int] | AbstractSet[str] | Mapping[int, Any] | Mapping[str, Any]] = None,
|
||||||
|
update: Optional[Dict[str, Any]] = None,
|
||||||
|
deep: bool = False,
|
||||||
|
) -> "Task":
|
||||||
|
"""Create a copy of the Task."""
|
||||||
|
exclude_set = {"id", "agent", "context", "tools"}
|
||||||
|
if exclude:
|
||||||
|
if isinstance(exclude, (AbstractSet, set)):
|
||||||
|
exclude_set.update(str(x) for x in exclude)
|
||||||
|
elif isinstance(exclude, Mapping):
|
||||||
|
exclude_set.update(str(x) for x in exclude.keys())
|
||||||
|
|
||||||
|
copied_task = super().copy(
|
||||||
|
include=include,
|
||||||
|
exclude=exclude_set,
|
||||||
|
update=update,
|
||||||
|
deep=deep,
|
||||||
|
)
|
||||||
|
|
||||||
|
copied_task.id = uuid.uuid4()
|
||||||
|
copied_task.agent = None
|
||||||
|
copied_task.context = None
|
||||||
|
copied_task.tools = []
|
||||||
|
|
||||||
|
return copied_task
|
||||||
|
|
||||||
|
def copy_with_agents(
|
||||||
self, agents: List["BaseAgent"], task_mapping: Dict[str, "Task"]
|
self, agents: List["BaseAgent"], task_mapping: Dict[str, "Task"]
|
||||||
) -> "Task":
|
) -> "Task":
|
||||||
"""Create a deep copy of the Task."""
|
"""Create a copy of the Task with agent references."""
|
||||||
exclude = {
|
copied_task = self.copy()
|
||||||
"id",
|
|
||||||
"agent",
|
|
||||||
"context",
|
|
||||||
"tools",
|
|
||||||
}
|
|
||||||
|
|
||||||
copied_data = self.model_dump(exclude=exclude)
|
|
||||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
|
||||||
|
|
||||||
cloned_context = (
|
|
||||||
[task_mapping[context_task.key] for context_task in self.context]
|
|
||||||
if self.context
|
|
||||||
else None
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_agent_by_role(role: str) -> Union["BaseAgent", None]:
|
def get_agent_by_role(role: str) -> Union["BaseAgent", None]:
|
||||||
return next((agent for agent in agents if agent.role == role), None)
|
return next((agent for agent in agents if agent.role == role), None)
|
||||||
|
|
||||||
cloned_agent = get_agent_by_role(self.agent.role) if self.agent else None
|
if self.agent:
|
||||||
cloned_tools = copy(self.tools) if self.tools else []
|
copied_task.agent = get_agent_by_role(self.agent.role)
|
||||||
|
|
||||||
copied_task = Task(
|
if self.context:
|
||||||
**copied_data,
|
copied_task.context = [
|
||||||
context=cloned_context,
|
task_mapping[context_task.key]
|
||||||
agent=cloned_agent,
|
for context_task in self.context
|
||||||
tools=cloned_tools,
|
if context_task.key in task_mapping
|
||||||
)
|
]
|
||||||
|
|
||||||
|
if self.tools:
|
||||||
|
copied_task.tools = copy(self.tools)
|
||||||
|
|
||||||
return copied_task
|
return copied_task
|
||||||
|
|
||||||
@@ -595,19 +695,32 @@ class Task(BaseModel):
|
|||||||
return OutputFormat.PYDANTIC
|
return OutputFormat.PYDANTIC
|
||||||
return OutputFormat.RAW
|
return OutputFormat.RAW
|
||||||
|
|
||||||
def _save_file(self, result: Any) -> None:
|
def _save_file(self, result: Union[Dict, str, Any]) -> None:
|
||||||
"""Save task output to a file.
|
"""Save task output to a file.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
For cross-platform file writing, especially on Windows, consider using FileWriterTool
|
||||||
|
from the crewai_tools package:
|
||||||
|
pip install 'crewai[tools]'
|
||||||
|
from crewai_tools import FileWriterTool
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
result: The result to save to the file. Can be a dict or any stringifiable object.
|
result: The result to save to the file. Can be a dict or any stringifiable object.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If output_file is not set
|
ValueError: If output_file is not set
|
||||||
RuntimeError: If there is an error writing to the file
|
RuntimeError: If there is an error writing to the file. For cross-platform
|
||||||
|
compatibility, especially on Windows, use FileWriterTool from crewai_tools
|
||||||
|
package.
|
||||||
"""
|
"""
|
||||||
if self.output_file is None:
|
if self.output_file is None:
|
||||||
raise ValueError("output_file is not set.")
|
raise ValueError("output_file is not set.")
|
||||||
|
|
||||||
|
FILEWRITER_RECOMMENDATION = (
|
||||||
|
"For cross-platform file writing, especially on Windows, "
|
||||||
|
"use FileWriterTool from crewai_tools package."
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
resolved_path = Path(self.output_file).expanduser().resolve()
|
resolved_path = Path(self.output_file).expanduser().resolve()
|
||||||
directory = resolved_path.parent
|
directory = resolved_path.parent
|
||||||
@@ -618,11 +731,17 @@ class Task(BaseModel):
|
|||||||
with resolved_path.open("w", encoding="utf-8") as file:
|
with resolved_path.open("w", encoding="utf-8") as file:
|
||||||
if isinstance(result, dict):
|
if isinstance(result, dict):
|
||||||
import json
|
import json
|
||||||
|
|
||||||
json.dump(result, file, ensure_ascii=False, indent=2)
|
json.dump(result, file, ensure_ascii=False, indent=2)
|
||||||
else:
|
else:
|
||||||
file.write(str(result))
|
file.write(str(result))
|
||||||
except (OSError, IOError) as e:
|
except (OSError, IOError) as e:
|
||||||
raise RuntimeError(f"Failed to save output file: {e}")
|
raise RuntimeError(
|
||||||
|
"\n".join([
|
||||||
|
f"Failed to save output file: {e}",
|
||||||
|
FILEWRITER_RECOMMENDATION
|
||||||
|
])
|
||||||
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
import json
|
import json
|
||||||
from typing import Any, Dict, Optional
|
from typing import Any, Callable, Dict, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, Field, model_validator
|
from pydantic import BaseModel, Field, model_validator
|
||||||
|
from pydantic.main import IncEx
|
||||||
|
from typing_extensions import Literal
|
||||||
|
|
||||||
from crewai.tasks.output_format import OutputFormat
|
from crewai.tasks.output_format import OutputFormat
|
||||||
|
|
||||||
@@ -34,8 +36,8 @@ class TaskOutput(BaseModel):
|
|||||||
self.summary = f"{excerpt}..."
|
self.summary = f"{excerpt}..."
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@property
|
def model_json(self) -> str:
|
||||||
def json(self) -> Optional[str]:
|
"""Get the JSON representation of the output."""
|
||||||
if self.output_format != OutputFormat.JSON:
|
if self.output_format != OutputFormat.JSON:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"""
|
"""
|
||||||
@@ -44,8 +46,37 @@ class TaskOutput(BaseModel):
|
|||||||
please make sure to set the output_json property for the task
|
please make sure to set the output_json property for the task
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
return json.dumps(self.json_dict) if self.json_dict else "{}"
|
||||||
|
|
||||||
return json.dumps(self.json_dict)
|
def model_dump_json(
|
||||||
|
self,
|
||||||
|
*,
|
||||||
|
indent: Optional[int] = None,
|
||||||
|
include: Optional[IncEx] = None,
|
||||||
|
exclude: Optional[IncEx] = None,
|
||||||
|
context: Optional[Any] = None,
|
||||||
|
by_alias: bool = False,
|
||||||
|
exclude_unset: bool = False,
|
||||||
|
exclude_defaults: bool = False,
|
||||||
|
exclude_none: bool = False,
|
||||||
|
round_trip: bool = False,
|
||||||
|
warnings: bool | Literal["none", "warn", "error"] = False,
|
||||||
|
serialize_as_any: bool = False,
|
||||||
|
) -> str:
|
||||||
|
"""Override model_dump_json to handle custom JSON output."""
|
||||||
|
return super().model_dump_json(
|
||||||
|
indent=indent,
|
||||||
|
include=include,
|
||||||
|
exclude=exclude,
|
||||||
|
context=context,
|
||||||
|
by_alias=by_alias,
|
||||||
|
exclude_unset=exclude_unset,
|
||||||
|
exclude_defaults=exclude_defaults,
|
||||||
|
exclude_none=exclude_none,
|
||||||
|
round_trip=round_trip,
|
||||||
|
warnings=warnings,
|
||||||
|
serialize_as_any=serialize_as_any,
|
||||||
|
)
|
||||||
|
|
||||||
def to_dict(self) -> Dict[str, Any]:
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
"""Convert json_output and pydantic_output to a dictionary."""
|
"""Convert json_output and pydantic_output to a dictionary."""
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ from crewai.utilities import I18N
|
|||||||
|
|
||||||
i18n = I18N()
|
i18n = I18N()
|
||||||
|
|
||||||
|
|
||||||
class AddImageToolSchema(BaseModel):
|
class AddImageToolSchema(BaseModel):
|
||||||
image_url: str = Field(..., description="The URL or path of the image to add")
|
image_url: str = Field(..., description="The URL or path of the image to add")
|
||||||
action: Optional[str] = Field(
|
action: Optional[str] = Field(
|
||||||
default=None,
|
default=None, description="Optional context or question about the image"
|
||||||
description="Optional context or question about the image"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -36,10 +36,7 @@ class AddImageTool(BaseTool):
|
|||||||
"image_url": {
|
"image_url": {
|
||||||
"url": image_url,
|
"url": image_url,
|
||||||
},
|
},
|
||||||
}
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
return {
|
return {"role": "user", "content": content}
|
||||||
"role": "user",
|
|
||||||
"content": content
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Optional, Union
|
from typing import Optional
|
||||||
|
|
||||||
from pydantic import Field
|
from pydantic import Field
|
||||||
|
|
||||||
@@ -54,12 +54,12 @@ class BaseAgentTool(BaseTool):
|
|||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Execute delegation to an agent with case-insensitive and whitespace-tolerant matching.
|
Execute delegation to an agent with case-insensitive and whitespace-tolerant matching.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
agent_name: Name/role of the agent to delegate to (case-insensitive)
|
agent_name: Name/role of the agent to delegate to (case-insensitive)
|
||||||
task: The specific question or task to delegate
|
task: The specific question or task to delegate
|
||||||
context: Optional additional context for the task execution
|
context: Optional additional context for the task execution
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
str: The execution result from the delegated agent or an error message
|
str: The execution result from the delegated agent or an error message
|
||||||
if the agent cannot be found
|
if the agent cannot be found
|
||||||
@@ -82,12 +82,12 @@ class BaseAgentTool(BaseTool):
|
|||||||
available_agents = [agent.role for agent in self.agents]
|
available_agents = [agent.role for agent in self.agents]
|
||||||
logger.debug(f"Available agents: {available_agents}")
|
logger.debug(f"Available agents: {available_agents}")
|
||||||
|
|
||||||
agent = [ # type: ignore # Incompatible types in assignment (expression has type "list[BaseAgent]", variable has type "str | None")
|
matching_agents = [
|
||||||
available_agent
|
available_agent
|
||||||
for available_agent in self.agents
|
for available_agent in self.agents
|
||||||
if self.sanitize_agent_name(available_agent.role) == sanitized_name
|
if self.sanitize_agent_name(available_agent.role) == sanitized_name
|
||||||
]
|
]
|
||||||
logger.debug(f"Found {len(agent)} matching agents for role '{sanitized_name}'")
|
logger.debug(f"Found {len(matching_agents)} matching agents for role '{sanitized_name}'")
|
||||||
except (AttributeError, ValueError) as e:
|
except (AttributeError, ValueError) as e:
|
||||||
# Handle specific exceptions that might occur during role name processing
|
# Handle specific exceptions that might occur during role name processing
|
||||||
return self.i18n.errors("agent_tool_unexisting_coworker").format(
|
return self.i18n.errors("agent_tool_unexisting_coworker").format(
|
||||||
@@ -97,7 +97,7 @@ class BaseAgentTool(BaseTool):
|
|||||||
error=str(e)
|
error=str(e)
|
||||||
)
|
)
|
||||||
|
|
||||||
if not agent:
|
if not matching_agents:
|
||||||
# No matching agent found after sanitization
|
# No matching agent found after sanitization
|
||||||
return self.i18n.errors("agent_tool_unexisting_coworker").format(
|
return self.i18n.errors("agent_tool_unexisting_coworker").format(
|
||||||
coworkers="\n".join(
|
coworkers="\n".join(
|
||||||
@@ -106,19 +106,19 @@ class BaseAgentTool(BaseTool):
|
|||||||
error=f"No agent found with role '{sanitized_name}'"
|
error=f"No agent found with role '{sanitized_name}'"
|
||||||
)
|
)
|
||||||
|
|
||||||
agent = agent[0]
|
selected_agent = matching_agents[0]
|
||||||
try:
|
try:
|
||||||
task_with_assigned_agent = Task(
|
task_with_assigned_agent = Task(
|
||||||
description=task,
|
description=task,
|
||||||
agent=agent,
|
agent=selected_agent,
|
||||||
expected_output=agent.i18n.slice("manager_request"),
|
expected_output=selected_agent.i18n.slice("manager_request"),
|
||||||
i18n=agent.i18n,
|
i18n=selected_agent.i18n,
|
||||||
)
|
)
|
||||||
logger.debug(f"Created task for agent '{self.sanitize_agent_name(agent.role)}': {task}")
|
logger.debug(f"Created task for agent '{self.sanitize_agent_name(selected_agent.role)}': {task}")
|
||||||
return agent.execute_task(task_with_assigned_agent, context)
|
return selected_agent.execute_task(task_with_assigned_agent, context)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Handle task creation or execution errors
|
# Handle task creation or execution errors
|
||||||
return self.i18n.errors("agent_tool_execution_error").format(
|
return self.i18n.errors("agent_tool_execution_error").format(
|
||||||
agent_role=self.sanitize_agent_name(agent.role),
|
agent_role=self.sanitize_agent_name(selected_agent.role),
|
||||||
error=str(e)
|
error=str(e)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,29 +1,36 @@
|
|||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from inspect import signature
|
from inspect import signature
|
||||||
from typing import Any, Callable, Type, get_args, get_origin
|
from typing import Any, Callable, Dict, Optional, Type, Tuple, get_args, get_origin
|
||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict, Field, create_model, validator
|
from pydantic import BaseModel, ConfigDict, Field, create_model, validator
|
||||||
|
from pydantic.fields import FieldInfo
|
||||||
from pydantic import BaseModel as PydanticBaseModel
|
from pydantic import BaseModel as PydanticBaseModel
|
||||||
|
|
||||||
from crewai.tools.structured_tool import CrewStructuredTool
|
from crewai.tools.structured_tool import CrewStructuredTool
|
||||||
|
|
||||||
|
def _create_model_fields(fields: Dict[str, Tuple[Any, FieldInfo]]) -> Dict[str, Any]:
|
||||||
|
"""Helper function to create model fields with proper type hints."""
|
||||||
|
return {name: (annotation, field) for name, (annotation, field) in fields.items()}
|
||||||
|
|
||||||
class BaseTool(BaseModel, ABC):
|
class BaseTool(BaseModel, ABC):
|
||||||
|
"""Base class for all tools."""
|
||||||
|
|
||||||
class _ArgsSchemaPlaceholder(PydanticBaseModel):
|
class _ArgsSchemaPlaceholder(PydanticBaseModel):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
model_config = ConfigDict()
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||||
|
func: Optional[Callable] = None
|
||||||
|
|
||||||
name: str
|
name: str
|
||||||
"""The unique name of the tool that clearly communicates its purpose."""
|
"""The unique name of the tool that clearly communicates its purpose."""
|
||||||
description: str
|
description: str
|
||||||
"""Used to tell the model how/when/why to use the tool."""
|
"""Used to tell the model how/when/why to use the tool."""
|
||||||
args_schema: Type[PydanticBaseModel] = Field(default_factory=_ArgsSchemaPlaceholder)
|
args_schema: Type[PydanticBaseModel] = Field(default=_ArgsSchemaPlaceholder)
|
||||||
"""The schema for the arguments that the tool accepts."""
|
"""The schema for the arguments that the tool accepts."""
|
||||||
description_updated: bool = False
|
description_updated: bool = False
|
||||||
"""Flag to check if the description has been updated."""
|
"""Flag to check if the description has been updated."""
|
||||||
cache_function: Callable = lambda _args=None, _result=None: True
|
cache_function: Callable = lambda _args=None, _result=None: True
|
||||||
"""Function that will be used to determine if the tool should be cached, should return a boolean. If None, the tool will be cached."""
|
"""Function that will be used to determine if the tool should be cached."""
|
||||||
result_as_answer: bool = False
|
result_as_answer: bool = False
|
||||||
"""Flag to check if the tool should be the final agent answer."""
|
"""Flag to check if the tool should be the final agent answer."""
|
||||||
|
|
||||||
@@ -46,7 +53,6 @@ class BaseTool(BaseModel, ABC):
|
|||||||
|
|
||||||
def model_post_init(self, __context: Any) -> None:
|
def model_post_init(self, __context: Any) -> None:
|
||||||
self._generate_description()
|
self._generate_description()
|
||||||
|
|
||||||
super().model_post_init(__context)
|
super().model_post_init(__context)
|
||||||
|
|
||||||
def run(
|
def run(
|
||||||
@@ -76,50 +82,7 @@ class BaseTool(BaseModel, ABC):
|
|||||||
result_as_answer=self.result_as_answer,
|
result_as_answer=self.result_as_answer,
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
def _set_args_schema(self) -> None:
|
||||||
def from_langchain(cls, tool: Any) -> "BaseTool":
|
|
||||||
"""Create a Tool instance from a CrewStructuredTool.
|
|
||||||
|
|
||||||
This method takes a CrewStructuredTool object and converts it into a
|
|
||||||
Tool instance. It ensures that the provided tool has a callable 'func'
|
|
||||||
attribute and infers the argument schema if not explicitly provided.
|
|
||||||
"""
|
|
||||||
if not hasattr(tool, "func") or not callable(tool.func):
|
|
||||||
raise ValueError("The provided tool must have a callable 'func' attribute.")
|
|
||||||
|
|
||||||
args_schema = getattr(tool, "args_schema", None)
|
|
||||||
|
|
||||||
if args_schema is None:
|
|
||||||
# Infer args_schema from the function signature if not provided
|
|
||||||
func_signature = signature(tool.func)
|
|
||||||
annotations = func_signature.parameters
|
|
||||||
args_fields = {}
|
|
||||||
for name, param in annotations.items():
|
|
||||||
if name != "self":
|
|
||||||
param_annotation = (
|
|
||||||
param.annotation if param.annotation != param.empty else Any
|
|
||||||
)
|
|
||||||
field_info = Field(
|
|
||||||
default=...,
|
|
||||||
description="",
|
|
||||||
)
|
|
||||||
args_fields[name] = (param_annotation, field_info)
|
|
||||||
if args_fields:
|
|
||||||
args_schema = create_model(f"{tool.name}Input", **args_fields)
|
|
||||||
else:
|
|
||||||
# Create a default schema with no fields if no parameters are found
|
|
||||||
args_schema = create_model(
|
|
||||||
f"{tool.name}Input", __base__=PydanticBaseModel
|
|
||||||
)
|
|
||||||
|
|
||||||
return cls(
|
|
||||||
name=getattr(tool, "name", "Unnamed Tool"),
|
|
||||||
description=getattr(tool, "description", ""),
|
|
||||||
func=tool.func,
|
|
||||||
args_schema=args_schema,
|
|
||||||
)
|
|
||||||
|
|
||||||
def _set_args_schema(self):
|
|
||||||
if self.args_schema is None:
|
if self.args_schema is None:
|
||||||
class_name = f"{self.__class__.__name__}Schema"
|
class_name = f"{self.__class__.__name__}Schema"
|
||||||
self.args_schema = type(
|
self.args_schema = type(
|
||||||
@@ -134,7 +97,7 @@ class BaseTool(BaseModel, ABC):
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
def _generate_description(self):
|
def _generate_description(self) -> None:
|
||||||
args_schema = {
|
args_schema = {
|
||||||
name: {
|
name: {
|
||||||
"description": field.description,
|
"description": field.description,
|
||||||
@@ -168,79 +131,25 @@ class BaseTool(BaseModel, ABC):
|
|||||||
|
|
||||||
|
|
||||||
class Tool(BaseTool):
|
class Tool(BaseTool):
|
||||||
"""The function that will be executed when the tool is called."""
|
"""Tool class that wraps a function."""
|
||||||
|
|
||||||
func: Callable
|
func: Callable
|
||||||
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
if "func" not in kwargs:
|
||||||
|
raise ValueError("Tool requires a 'func' argument")
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
def _run(self, *args: Any, **kwargs: Any) -> Any:
|
def _run(self, *args: Any, **kwargs: Any) -> Any:
|
||||||
return self.func(*args, **kwargs)
|
return self.func(*args, **kwargs)
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_langchain(cls, tool: Any) -> "Tool":
|
|
||||||
"""Create a Tool instance from a CrewStructuredTool.
|
|
||||||
|
|
||||||
This method takes a CrewStructuredTool object and converts it into a
|
def tool(*args: Any) -> Any:
|
||||||
Tool instance. It ensures that the provided tool has a callable 'func'
|
"""Decorator to create a tool from a function."""
|
||||||
attribute and infers the argument schema if not explicitly provided.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
tool (Any): The CrewStructuredTool object to be converted.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tool: A new Tool instance created from the provided CrewStructuredTool.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If the provided tool does not have a callable 'func' attribute.
|
|
||||||
"""
|
|
||||||
if not hasattr(tool, "func") or not callable(tool.func):
|
|
||||||
raise ValueError("The provided tool must have a callable 'func' attribute.")
|
|
||||||
|
|
||||||
args_schema = getattr(tool, "args_schema", None)
|
|
||||||
|
|
||||||
if args_schema is None:
|
|
||||||
# Infer args_schema from the function signature if not provided
|
|
||||||
func_signature = signature(tool.func)
|
|
||||||
annotations = func_signature.parameters
|
|
||||||
args_fields = {}
|
|
||||||
for name, param in annotations.items():
|
|
||||||
if name != "self":
|
|
||||||
param_annotation = (
|
|
||||||
param.annotation if param.annotation != param.empty else Any
|
|
||||||
)
|
|
||||||
field_info = Field(
|
|
||||||
default=...,
|
|
||||||
description="",
|
|
||||||
)
|
|
||||||
args_fields[name] = (param_annotation, field_info)
|
|
||||||
if args_fields:
|
|
||||||
args_schema = create_model(f"{tool.name}Input", **args_fields)
|
|
||||||
else:
|
|
||||||
# Create a default schema with no fields if no parameters are found
|
|
||||||
args_schema = create_model(
|
|
||||||
f"{tool.name}Input", __base__=PydanticBaseModel
|
|
||||||
)
|
|
||||||
|
|
||||||
return cls(
|
|
||||||
name=getattr(tool, "name", "Unnamed Tool"),
|
|
||||||
description=getattr(tool, "description", ""),
|
|
||||||
func=tool.func,
|
|
||||||
args_schema=args_schema,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def to_langchain(
|
|
||||||
tools: list[BaseTool | CrewStructuredTool],
|
|
||||||
) -> list[CrewStructuredTool]:
|
|
||||||
return [t.to_structured_tool() if isinstance(t, BaseTool) else t for t in tools]
|
|
||||||
|
|
||||||
|
|
||||||
def tool(*args):
|
|
||||||
"""
|
|
||||||
Decorator to create a tool from a function.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _make_with_name(tool_name: str) -> Callable:
|
def _make_with_name(tool_name: str) -> Callable:
|
||||||
def _make_tool(f: Callable) -> BaseTool:
|
def _make_tool(f: Callable) -> Tool:
|
||||||
if f.__doc__ is None:
|
if f.__doc__ is None:
|
||||||
raise ValueError("Function must have a docstring")
|
raise ValueError("Function must have a docstring")
|
||||||
if f.__annotations__ is None:
|
if f.__annotations__ is None:
|
||||||
|
|||||||
@@ -2,9 +2,14 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import inspect
|
import inspect
|
||||||
import textwrap
|
import textwrap
|
||||||
from typing import Any, Callable, Optional, Union, get_type_hints
|
from typing import Any, Callable, Dict, Optional, Tuple, Union, get_type_hints
|
||||||
|
|
||||||
from pydantic import BaseModel, Field, create_model
|
from pydantic import BaseModel, ConfigDict, Field, create_model
|
||||||
|
from pydantic.fields import FieldInfo
|
||||||
|
|
||||||
|
def _create_model_fields(fields: Dict[str, Tuple[Any, FieldInfo]]) -> Dict[str, Any]:
|
||||||
|
"""Helper function to create model fields with proper type hints."""
|
||||||
|
return {name: (annotation, field) for name, (annotation, field) in fields.items()}
|
||||||
|
|
||||||
from crewai.utilities.logger import Logger
|
from crewai.utilities.logger import Logger
|
||||||
|
|
||||||
@@ -142,7 +147,8 @@ class CrewStructuredTool:
|
|||||||
|
|
||||||
# Create model
|
# Create model
|
||||||
schema_name = f"{name.title()}Schema"
|
schema_name = f"{name.title()}Schema"
|
||||||
return create_model(schema_name, **fields)
|
model_fields = _create_model_fields(fields)
|
||||||
|
return create_model(schema_name, __base__=BaseModel, **model_fields)
|
||||||
|
|
||||||
def _validate_function_signature(self) -> None:
|
def _validate_function_signature(self) -> None:
|
||||||
"""Validate that the function signature matches the args schema."""
|
"""Validate that the function signature matches the args schema."""
|
||||||
|
|||||||
@@ -1,9 +1,14 @@
|
|||||||
import ast
|
import ast
|
||||||
import datetime
|
import datetime
|
||||||
|
import json
|
||||||
import time
|
import time
|
||||||
from difflib import SequenceMatcher
|
from difflib import SequenceMatcher
|
||||||
|
from json import JSONDecodeError
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
from typing import Any, List, Union
|
from typing import Any, Dict, List, Optional, Union
|
||||||
|
|
||||||
|
import json5
|
||||||
|
from json_repair import repair_json
|
||||||
|
|
||||||
import crewai.utilities.events as events
|
import crewai.utilities.events as events
|
||||||
from crewai.agents.tools_handler import ToolsHandler
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
@@ -19,7 +24,15 @@ try:
|
|||||||
import agentops # type: ignore
|
import agentops # type: ignore
|
||||||
except ImportError:
|
except ImportError:
|
||||||
agentops = None
|
agentops = None
|
||||||
OPENAI_BIGGER_MODELS = ["gpt-4", "gpt-4o", "o1-preview", "o1-mini", "o1", "o3", "o3-mini"]
|
OPENAI_BIGGER_MODELS = [
|
||||||
|
"gpt-4",
|
||||||
|
"gpt-4o",
|
||||||
|
"o1-preview",
|
||||||
|
"o1-mini",
|
||||||
|
"o1",
|
||||||
|
"o3",
|
||||||
|
"o3-mini",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class ToolUsageErrorException(Exception):
|
class ToolUsageErrorException(Exception):
|
||||||
@@ -80,7 +93,7 @@ class ToolUsage:
|
|||||||
self._max_parsing_attempts = 2
|
self._max_parsing_attempts = 2
|
||||||
self._remember_format_after_usages = 4
|
self._remember_format_after_usages = 4
|
||||||
|
|
||||||
def parse(self, tool_string: str):
|
def parse_tool_calling(self, tool_string: str):
|
||||||
"""Parse the tool string and return the tool calling."""
|
"""Parse the tool string and return the tool calling."""
|
||||||
return self._tool_calling(tool_string)
|
return self._tool_calling(tool_string)
|
||||||
|
|
||||||
@@ -94,7 +107,6 @@ class ToolUsage:
|
|||||||
self.task.increment_tools_errors()
|
self.task.increment_tools_errors()
|
||||||
return error
|
return error
|
||||||
|
|
||||||
# BUG? The code below seems to be unreachable
|
|
||||||
try:
|
try:
|
||||||
tool = self._select_tool(calling.tool_name)
|
tool = self._select_tool(calling.tool_name)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -116,7 +128,7 @@ class ToolUsage:
|
|||||||
self._printer.print(content=f"\n\n{error}\n", color="red")
|
self._printer.print(content=f"\n\n{error}\n", color="red")
|
||||||
return error
|
return error
|
||||||
|
|
||||||
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}" # type: ignore # BUG?: "_use" of "ToolUsage" does not return a value (it only ever returns None)
|
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}"
|
||||||
|
|
||||||
def _use(
|
def _use(
|
||||||
self,
|
self,
|
||||||
@@ -169,7 +181,7 @@ class ToolUsage:
|
|||||||
|
|
||||||
if calling.arguments:
|
if calling.arguments:
|
||||||
try:
|
try:
|
||||||
acceptable_args = tool.args_schema.schema()["properties"].keys() # type: ignore # Item "None" of "type[BaseModel] | None" has no attribute "schema"
|
acceptable_args = tool.args_schema.model_json_schema()["properties"].keys() # type: ignore
|
||||||
arguments = {
|
arguments = {
|
||||||
k: v
|
k: v
|
||||||
for k, v in calling.arguments.items()
|
for k, v in calling.arguments.items()
|
||||||
@@ -349,13 +361,13 @@ class ToolUsage:
|
|||||||
tool_name = self.action.tool
|
tool_name = self.action.tool
|
||||||
tool = self._select_tool(tool_name)
|
tool = self._select_tool(tool_name)
|
||||||
try:
|
try:
|
||||||
tool_input = self._validate_tool_input(self.action.tool_input)
|
arguments = self._validate_tool_input(self.action.tool_input)
|
||||||
arguments = ast.literal_eval(tool_input)
|
|
||||||
except Exception:
|
except Exception:
|
||||||
if raise_error:
|
if raise_error:
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
|
return ToolUsageErrorException(
|
||||||
f'{self._i18n.errors("tool_arguments_error")}'
|
f'{self._i18n.errors("tool_arguments_error")}'
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -363,14 +375,14 @@ class ToolUsage:
|
|||||||
if raise_error:
|
if raise_error:
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
|
return ToolUsageErrorException(
|
||||||
f'{self._i18n.errors("tool_arguments_error")}'
|
f'{self._i18n.errors("tool_arguments_error")}'
|
||||||
)
|
)
|
||||||
|
|
||||||
return ToolCalling(
|
return ToolCalling(
|
||||||
tool_name=tool.name,
|
tool_name=tool.name,
|
||||||
arguments=arguments,
|
arguments=arguments,
|
||||||
log=tool_string, # type: ignore
|
log=tool_string,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _tool_calling(
|
def _tool_calling(
|
||||||
@@ -396,57 +408,55 @@ class ToolUsage:
|
|||||||
)
|
)
|
||||||
return self._tool_calling(tool_string)
|
return self._tool_calling(tool_string)
|
||||||
|
|
||||||
def _validate_tool_input(self, tool_input: str) -> str:
|
def _validate_tool_input(self, tool_input: Optional[str]) -> Dict[str, Any]:
|
||||||
|
if tool_input is None:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
if not isinstance(tool_input, str) or not tool_input.strip():
|
||||||
|
raise Exception(
|
||||||
|
"Tool input must be a valid dictionary in JSON or Python literal format"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Attempt 1: Parse as JSON
|
||||||
try:
|
try:
|
||||||
ast.literal_eval(tool_input)
|
arguments = json.loads(tool_input)
|
||||||
return tool_input
|
if isinstance(arguments, dict):
|
||||||
except Exception:
|
return arguments
|
||||||
# Clean and ensure the string is properly enclosed in braces
|
except (JSONDecodeError, TypeError):
|
||||||
tool_input = tool_input.strip()
|
pass # Continue to the next parsing attempt
|
||||||
if not tool_input.startswith("{"):
|
|
||||||
tool_input = "{" + tool_input
|
|
||||||
if not tool_input.endswith("}"):
|
|
||||||
tool_input += "}"
|
|
||||||
|
|
||||||
# Manually split the input into key-value pairs
|
# Attempt 2: Parse as Python literal
|
||||||
entries = tool_input.strip("{} ").split(",")
|
try:
|
||||||
formatted_entries = []
|
arguments = ast.literal_eval(tool_input)
|
||||||
|
if isinstance(arguments, dict):
|
||||||
|
return arguments
|
||||||
|
except (ValueError, SyntaxError):
|
||||||
|
pass # Continue to the next parsing attempt
|
||||||
|
|
||||||
for entry in entries:
|
# Attempt 3: Parse as JSON5
|
||||||
if ":" not in entry:
|
try:
|
||||||
continue # Skip malformed entries
|
arguments = json5.loads(tool_input)
|
||||||
key, value = entry.split(":", 1)
|
if isinstance(arguments, dict):
|
||||||
|
return arguments
|
||||||
|
except (JSONDecodeError, ValueError, TypeError):
|
||||||
|
pass # Continue to the next parsing attempt
|
||||||
|
|
||||||
# Remove extraneous white spaces and quotes, replace single quotes
|
# Attempt 4: Repair JSON
|
||||||
key = key.strip().strip('"').replace("'", '"')
|
try:
|
||||||
value = value.strip()
|
repaired_input = repair_json(tool_input)
|
||||||
|
self._printer.print(
|
||||||
|
content=f"Repaired JSON: {repaired_input}", color="blue"
|
||||||
|
)
|
||||||
|
arguments = json.loads(repaired_input)
|
||||||
|
if isinstance(arguments, dict):
|
||||||
|
return arguments
|
||||||
|
except Exception as e:
|
||||||
|
self._printer.print(content=f"Failed to repair JSON: {e}", color="red")
|
||||||
|
|
||||||
# Handle replacement of single quotes at the start and end of the value string
|
# If all parsing attempts fail, raise an error
|
||||||
if value.startswith("'") and value.endswith("'"):
|
raise Exception(
|
||||||
value = value[1:-1] # Remove single quotes
|
"Tool input must be a valid dictionary in JSON or Python literal format"
|
||||||
value = (
|
)
|
||||||
'"' + value.replace('"', '\\"') + '"'
|
|
||||||
) # Re-encapsulate with double quotes
|
|
||||||
elif value.isdigit(): # Check if value is a digit, hence integer
|
|
||||||
value = value
|
|
||||||
elif value.lower() in [
|
|
||||||
"true",
|
|
||||||
"false",
|
|
||||||
]: # Check for boolean and null values
|
|
||||||
value = value.lower().capitalize()
|
|
||||||
elif value.lower() == "null":
|
|
||||||
value = "None"
|
|
||||||
else:
|
|
||||||
# Assume the value is a string and needs quotes
|
|
||||||
value = '"' + value.replace('"', '\\"') + '"'
|
|
||||||
|
|
||||||
# Rebuild the entry with proper quoting
|
|
||||||
formatted_entry = f'"{key}": {value}'
|
|
||||||
formatted_entries.append(formatted_entry)
|
|
||||||
|
|
||||||
# Reconstruct the JSON string
|
|
||||||
new_json_string = "{" + ", ".join(formatted_entries) + "}"
|
|
||||||
return new_json_string
|
|
||||||
|
|
||||||
def on_tool_error(self, tool: Any, tool_calling: ToolCalling, e: Exception) -> None:
|
def on_tool_error(self, tool: Any, tool_calling: ToolCalling, e: Exception) -> None:
|
||||||
event_data = self._prepare_event_data(tool, tool_calling)
|
event_data = self._prepare_event_data(tool, tool_calling)
|
||||||
|
|||||||
@@ -9,13 +9,13 @@
|
|||||||
"task": "\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
|
"task": "\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
|
||||||
"memory": "\n\n# Useful context: \n{memory}",
|
"memory": "\n\n# Useful context: \n{memory}",
|
||||||
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
|
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
|
||||||
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple python dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
|
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
|
||||||
"no_tools": "\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
"no_tools": "\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. When responding, I must use the following format:\n\n```\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n```\nThis Thought/Action/Action Input/Result can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
|
||||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
|
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\n```\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n```",
|
||||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nHere is the expected format I must follow:\n\n```\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n This Thought/Action/Action Input/Result process can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
|
||||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||||
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
|
"expected_output": "\nThis is the expected criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
|
||||||
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
||||||
"getting_input": "This is the agent's final answer: {final_answer}\n\n",
|
"getting_input": "This is the agent's final answer: {final_answer}\n\n",
|
||||||
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
|
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
|
||||||
@@ -23,10 +23,12 @@
|
|||||||
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
|
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
|
||||||
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
|
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||||
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
|
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
|
||||||
"human_feedback_classification": "Determine if the following feedback indicates that the user is satisfied or if further changes are needed. Respond with 'True' if further changes are needed, or 'False' if the user is satisfied. **Important** Do not include any additional commentary outside of your 'True' or 'False' response.\n\nFeedback: \"{feedback}\""
|
"human_feedback_classification": "Determine if the following feedback indicates that the user is satisfied or if further changes are needed. Respond with 'True' if further changes are needed, or 'False' if the user is satisfied. **Important** Do not include any additional commentary outside of your 'True' or 'False' response.\n\nFeedback: \"{feedback}\"",
|
||||||
|
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
|
||||||
|
"feedback_instructions": "User feedback: {feedback}\nInstructions: Use this feedback to enhance the next output iteration.\nNote: Do not respond or add commentary."
|
||||||
},
|
},
|
||||||
"errors": {
|
"errors": {
|
||||||
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",
|
"force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}",
|
||||||
"force_final_answer": "Now it's time you MUST give your absolute best final answer. You'll ignore all previous instructions, stop using any tools, and just return your absolute BEST Final answer.",
|
"force_final_answer": "Now it's time you MUST give your absolute best final answer. You'll ignore all previous instructions, stop using any tools, and just return your absolute BEST Final answer.",
|
||||||
"agent_tool_unexisting_coworker": "\nError executing tool. coworker mentioned not found, it must be one of the following options:\n{coworkers}\n",
|
"agent_tool_unexisting_coworker": "\nError executing tool. coworker mentioned not found, it must be one of the following options:\n{coworkers}\n",
|
||||||
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
|
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
|
||||||
@@ -34,14 +36,15 @@
|
|||||||
"tool_arguments_error": "Error: the Action Input is not a valid key, value dictionary.",
|
"tool_arguments_error": "Error: the Action Input is not a valid key, value dictionary.",
|
||||||
"wrong_tool_name": "You tried to use the tool {tool}, but it doesn't exist. You must use one of the following tools, use one at time: {tools}.",
|
"wrong_tool_name": "You tried to use the tool {tool}, but it doesn't exist. You must use one of the following tools, use one at time: {tools}.",
|
||||||
"tool_usage_exception": "I encountered an error while trying to use the tool. This was the error: {error}.\n Tool {tool} accepts these inputs: {tool_inputs}",
|
"tool_usage_exception": "I encountered an error while trying to use the tool. This was the error: {error}.\n Tool {tool} accepts these inputs: {tool_inputs}",
|
||||||
"agent_tool_execution_error": "Error executing task with agent '{agent_role}'. Error: {error}"
|
"agent_tool_execution_error": "Error executing task with agent '{agent_role}'. Error: {error}",
|
||||||
|
"validation_error": "### Previous attempt failed validation: {guardrail_result_error}\n\n\n### Previous result:\n{task_output}\n\n\nTry again, making sure to address the validation error."
|
||||||
},
|
},
|
||||||
"tools": {
|
"tools": {
|
||||||
"delegate_work": "Delegate a specific task to one of the following coworkers: {coworkers}\nThe input to this tool should be the coworker, the task you want them to do, and ALL necessary context to execute the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
|
"delegate_work": "Delegate a specific task to one of the following coworkers: {coworkers}\nThe input to this tool should be the coworker, the task you want them to do, and ALL necessary context to execute the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
|
||||||
"ask_question": "Ask a specific question to one of the following coworkers: {coworkers}\nThe input to this tool should be the coworker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolute everything you know, don't reference things but instead explain them.",
|
"ask_question": "Ask a specific question to one of the following coworkers: {coworkers}\nThe input to this tool should be the coworker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolute everything you know, don't reference things but instead explain them.",
|
||||||
"add_image": {
|
"add_image": {
|
||||||
"name": "Add image to content",
|
"name": "Add image to content",
|
||||||
"description": "See image to understand it's content, you can optionally ask a question about the image",
|
"description": "See image to understand its content, you can optionally ask a question about the image",
|
||||||
"default_action": "Please provide a detailed description of this image, including all visual elements, context, and any notable details you can observe."
|
"default_action": "Please provide a detailed description of this image, including all visual elements, context, and any notable details you can observe."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
40
src/crewai/types/crew_chat.py
Normal file
40
src/crewai/types/crew_chat.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
from typing import List
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
class ChatInputField(BaseModel):
|
||||||
|
"""
|
||||||
|
Represents a single required input for the crew, with a name and short description.
|
||||||
|
Example:
|
||||||
|
{
|
||||||
|
"name": "topic",
|
||||||
|
"description": "The topic to focus on for the conversation"
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
name: str = Field(..., description="The name of the input field")
|
||||||
|
description: str = Field(..., description="A short description of the input field")
|
||||||
|
|
||||||
|
|
||||||
|
class ChatInputs(BaseModel):
|
||||||
|
"""
|
||||||
|
Holds a high-level crew_description plus a list of ChatInputFields.
|
||||||
|
Example:
|
||||||
|
{
|
||||||
|
"crew_name": "topic-based-qa",
|
||||||
|
"crew_description": "Use this crew for topic-based Q&A",
|
||||||
|
"inputs": [
|
||||||
|
{"name": "topic", "description": "The topic to focus on"},
|
||||||
|
{"name": "username", "description": "Name of the user"},
|
||||||
|
]
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
crew_name: str = Field(..., description="The name of the crew")
|
||||||
|
crew_description: str = Field(
|
||||||
|
..., description="A description of the crew's purpose"
|
||||||
|
)
|
||||||
|
inputs: List[ChatInputField] = Field(
|
||||||
|
default_factory=list, description="A list of input fields for the crew"
|
||||||
|
)
|
||||||
@@ -4,3 +4,7 @@ DEFAULT_SCORE_THRESHOLD = 0.35
|
|||||||
KNOWLEDGE_DIRECTORY = "knowledge"
|
KNOWLEDGE_DIRECTORY = "knowledge"
|
||||||
MAX_LLM_RETRY = 3
|
MAX_LLM_RETRY = 3
|
||||||
MAX_FILE_NAME_LENGTH = 255
|
MAX_FILE_NAME_LENGTH = 255
|
||||||
|
|
||||||
|
# Default embedding configuration
|
||||||
|
DEFAULT_EMBEDDING_PROVIDER = "openai"
|
||||||
|
DEFAULT_EMBEDDING_MODEL = "text-embedding-3-small"
|
||||||
|
|||||||
@@ -26,17 +26,24 @@ class Converter(OutputConverter):
|
|||||||
if self.llm.supports_function_calling():
|
if self.llm.supports_function_calling():
|
||||||
return self._create_instructor().to_pydantic()
|
return self._create_instructor().to_pydantic()
|
||||||
else:
|
else:
|
||||||
return self.llm.call(
|
response = self.llm.call(
|
||||||
[
|
[
|
||||||
{"role": "system", "content": self.instructions},
|
{"role": "system", "content": self.instructions},
|
||||||
{"role": "user", "content": self.text},
|
{"role": "user", "content": self.text},
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
return self.model.model_validate_json(response)
|
||||||
|
except ValidationError as e:
|
||||||
|
if current_attempt < self.max_attempts:
|
||||||
|
return self.to_pydantic(current_attempt + 1)
|
||||||
|
raise ConverterError(
|
||||||
|
f"Failed to convert text into a Pydantic model due to the following validation error: {e}"
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if current_attempt < self.max_attempts:
|
if current_attempt < self.max_attempts:
|
||||||
return self.to_pydantic(current_attempt + 1)
|
return self.to_pydantic(current_attempt + 1)
|
||||||
return ConverterError(
|
raise ConverterError(
|
||||||
f"Failed to convert text into a pydantic model due to the following error: {e}"
|
f"Failed to convert text into a Pydantic model due to the following error: {e}"
|
||||||
)
|
)
|
||||||
|
|
||||||
def to_json(self, current_attempt=1):
|
def to_json(self, current_attempt=1):
|
||||||
@@ -66,7 +73,6 @@ class Converter(OutputConverter):
|
|||||||
llm=self.llm,
|
llm=self.llm,
|
||||||
model=self.model,
|
model=self.model,
|
||||||
content=self.text,
|
content=self.text,
|
||||||
instructions=self.instructions,
|
|
||||||
)
|
)
|
||||||
return inst
|
return inst
|
||||||
|
|
||||||
@@ -187,10 +193,15 @@ def convert_with_instructions(
|
|||||||
|
|
||||||
|
|
||||||
def get_conversion_instructions(model: Type[BaseModel], llm: Any) -> str:
|
def get_conversion_instructions(model: Type[BaseModel], llm: Any) -> str:
|
||||||
instructions = "I'm gonna convert this raw text into valid JSON."
|
instructions = "Please convert the following text into valid JSON."
|
||||||
if llm.supports_function_calling():
|
if llm.supports_function_calling():
|
||||||
model_schema = PydanticSchemaParser(model=model).get_schema()
|
model_schema = PydanticSchemaParser(model=model).get_schema()
|
||||||
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
|
instructions += (
|
||||||
|
f"\n\nThe JSON should follow this schema:\n```json\n{model_schema}\n```"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
model_description = generate_model_description(model)
|
||||||
|
instructions += f"\n\nThe JSON should follow this format:\n{model_description}"
|
||||||
return instructions
|
return instructions
|
||||||
|
|
||||||
|
|
||||||
@@ -230,9 +241,13 @@ def generate_model_description(model: Type[BaseModel]) -> str:
|
|||||||
origin = get_origin(field_type)
|
origin = get_origin(field_type)
|
||||||
args = get_args(field_type)
|
args = get_args(field_type)
|
||||||
|
|
||||||
if origin is Union and type(None) in args:
|
if origin is Union or (origin is None and len(args) > 0):
|
||||||
|
# Handle both Union and the new '|' syntax
|
||||||
non_none_args = [arg for arg in args if arg is not type(None)]
|
non_none_args = [arg for arg in args if arg is not type(None)]
|
||||||
return f"Optional[{describe_field(non_none_args[0])}]"
|
if len(non_none_args) == 1:
|
||||||
|
return f"Optional[{describe_field(non_none_args[0])}]"
|
||||||
|
else:
|
||||||
|
return f"Optional[Union[{', '.join(describe_field(arg) for arg in non_none_args)}]]"
|
||||||
elif origin is list:
|
elif origin is list:
|
||||||
return f"List[{describe_field(args[0])}]"
|
return f"List[{describe_field(args[0])}]"
|
||||||
elif origin is dict:
|
elif origin is dict:
|
||||||
@@ -241,8 +256,10 @@ def generate_model_description(model: Type[BaseModel]) -> str:
|
|||||||
return f"Dict[{key_type}, {value_type}]"
|
return f"Dict[{key_type}, {value_type}]"
|
||||||
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
|
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
|
||||||
return generate_model_description(field_type)
|
return generate_model_description(field_type)
|
||||||
else:
|
elif hasattr(field_type, "__name__"):
|
||||||
return field_type.__name__
|
return field_type.__name__
|
||||||
|
else:
|
||||||
|
return str(field_type)
|
||||||
|
|
||||||
fields = model.__annotations__
|
fields = model.__annotations__
|
||||||
field_descriptions = [
|
field_descriptions = [
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
"""JSON encoder for handling CrewAI specific types."""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from datetime import date, datetime
|
from datetime import date, datetime
|
||||||
from decimal import Decimal
|
from decimal import Decimal
|
||||||
@@ -8,6 +10,7 @@ from pydantic import BaseModel
|
|||||||
|
|
||||||
|
|
||||||
class CrewJSONEncoder(json.JSONEncoder):
|
class CrewJSONEncoder(json.JSONEncoder):
|
||||||
|
"""Custom JSON encoder for CrewAI objects and special types."""
|
||||||
def default(self, obj):
|
def default(self, obj):
|
||||||
if isinstance(obj, BaseModel):
|
if isinstance(obj, BaseModel):
|
||||||
return self._handle_pydantic_model(obj)
|
return self._handle_pydantic_model(obj)
|
||||||
|
|||||||
@@ -6,9 +6,10 @@ from pydantic import BaseModel, ValidationError
|
|||||||
|
|
||||||
from crewai.agents.parser import OutputParserException
|
from crewai.agents.parser import OutputParserException
|
||||||
|
|
||||||
|
"""Parser for converting text outputs into Pydantic models."""
|
||||||
|
|
||||||
class CrewPydanticOutputParser:
|
class CrewPydanticOutputParser:
|
||||||
"""Parses the text into pydantic models"""
|
"""Parses text outputs into specified Pydantic models."""
|
||||||
|
|
||||||
pydantic_object: Type[BaseModel]
|
pydantic_object: Type[BaseModel]
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,15 @@
|
|||||||
import os
|
import os
|
||||||
from typing import Any, Dict, cast
|
from typing import Any, Dict, List, Optional, cast
|
||||||
|
|
||||||
from chromadb import Documents, EmbeddingFunction, Embeddings
|
from chromadb import Documents, EmbeddingFunction, Embeddings
|
||||||
from chromadb.api.types import validate_embedding_function
|
from chromadb.api.types import validate_embedding_function
|
||||||
|
|
||||||
|
from crewai.utilities.exceptions.embedding_exceptions import (
|
||||||
|
EmbeddingConfigurationError,
|
||||||
|
EmbeddingProviderError,
|
||||||
|
EmbeddingInitializationError
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class EmbeddingConfigurator:
|
class EmbeddingConfigurator:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@@ -21,7 +27,7 @@ class EmbeddingConfigurator:
|
|||||||
|
|
||||||
def configure_embedder(
|
def configure_embedder(
|
||||||
self,
|
self,
|
||||||
embedder_config: Dict[str, Any] | None = None,
|
embedder_config: Optional[Dict[str, Any]] = None,
|
||||||
) -> EmbeddingFunction:
|
) -> EmbeddingFunction:
|
||||||
"""Configures and returns an embedding function based on the provided config."""
|
"""Configures and returns an embedding function based on the provided config."""
|
||||||
if embedder_config is None:
|
if embedder_config is None:
|
||||||
@@ -36,42 +42,47 @@ class EmbeddingConfigurator:
|
|||||||
validate_embedding_function(provider)
|
validate_embedding_function(provider)
|
||||||
return provider
|
return provider
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise ValueError(f"Invalid custom embedding function: {str(e)}")
|
raise EmbeddingConfigurationError(f"Invalid custom embedding function: {str(e)}")
|
||||||
|
|
||||||
if provider not in self.embedding_functions:
|
if not provider or provider not in self.embedding_functions:
|
||||||
raise Exception(
|
raise EmbeddingProviderError(str(provider), list(self.embedding_functions.keys()))
|
||||||
f"Unsupported embedding provider: {provider}, supported providers: {list(self.embedding_functions.keys())}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return self.embedding_functions[provider](config, model_name)
|
try:
|
||||||
|
return self.embedding_functions[str(provider)](config, model_name)
|
||||||
|
except Exception as e:
|
||||||
|
raise EmbeddingInitializationError(str(provider), str(e))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _create_default_embedding_function():
|
def _create_default_embedding_function() -> EmbeddingFunction:
|
||||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
from crewai.utilities.constants import DEFAULT_EMBEDDING_PROVIDER, DEFAULT_EMBEDDING_MODEL
|
||||||
OpenAIEmbeddingFunction,
|
|
||||||
)
|
provider = os.getenv("CREWAI_EMBEDDING_PROVIDER", DEFAULT_EMBEDDING_PROVIDER)
|
||||||
|
model = os.getenv("CREWAI_EMBEDDING_MODEL", DEFAULT_EMBEDDING_MODEL)
|
||||||
return OpenAIEmbeddingFunction(
|
|
||||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
if provider == "openai":
|
||||||
)
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
if not api_key:
|
||||||
|
raise EmbeddingConfigurationError("OpenAI API key is required but not provided")
|
||||||
|
from chromadb.utils.embedding_functions.openai_embedding_function import OpenAIEmbeddingFunction
|
||||||
|
return OpenAIEmbeddingFunction(api_key=api_key, model_name=model)
|
||||||
|
elif provider == "ollama":
|
||||||
|
from chromadb.utils.embedding_functions.ollama_embedding_function import OllamaEmbeddingFunction
|
||||||
|
url = os.getenv("CREWAI_OLLAMA_URL", "http://localhost:11434/api/embeddings")
|
||||||
|
return OllamaEmbeddingFunction(url=url, model_name=model)
|
||||||
|
else:
|
||||||
|
raise EmbeddingProviderError(provider, ["openai", "ollama"])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_openai(config, model_name):
|
def _configure_openai(config: Dict[str, Any], model_name: str) -> EmbeddingFunction:
|
||||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
from chromadb.utils.embedding_functions.openai_embedding_function import OpenAIEmbeddingFunction
|
||||||
OpenAIEmbeddingFunction,
|
|
||||||
)
|
|
||||||
|
|
||||||
return OpenAIEmbeddingFunction(
|
return OpenAIEmbeddingFunction(
|
||||||
api_key=config.get("api_key") or os.getenv("OPENAI_API_KEY"),
|
api_key=config.get("api_key") or os.getenv("OPENAI_API_KEY"),
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_azure(config, model_name):
|
def _configure_azure(config: Dict[str, Any], model_name: str) -> EmbeddingFunction:
|
||||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
from chromadb.utils.embedding_functions.openai_embedding_function import OpenAIEmbeddingFunction
|
||||||
OpenAIEmbeddingFunction,
|
|
||||||
)
|
|
||||||
|
|
||||||
return OpenAIEmbeddingFunction(
|
return OpenAIEmbeddingFunction(
|
||||||
api_key=config.get("api_key"),
|
api_key=config.get("api_key"),
|
||||||
api_base=config.get("api_base"),
|
api_base=config.get("api_base"),
|
||||||
@@ -81,79 +92,62 @@ class EmbeddingConfigurator:
|
|||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_ollama(config, model_name):
|
def _configure_ollama(config: Dict[str, Any], model_name: str) -> EmbeddingFunction:
|
||||||
from chromadb.utils.embedding_functions.ollama_embedding_function import (
|
from chromadb.utils.embedding_functions.ollama_embedding_function import OllamaEmbeddingFunction
|
||||||
OllamaEmbeddingFunction,
|
|
||||||
)
|
|
||||||
|
|
||||||
return OllamaEmbeddingFunction(
|
return OllamaEmbeddingFunction(
|
||||||
url=config.get("url", "http://localhost:11434/api/embeddings"),
|
url=config.get("url", "http://localhost:11434/api/embeddings"),
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_vertexai(config, model_name):
|
def _configure_vertexai(config: Dict[str, Any], model_name: str) -> EmbeddingFunction:
|
||||||
from chromadb.utils.embedding_functions.google_embedding_function import (
|
from chromadb.utils.embedding_functions.google_embedding_function import GoogleVertexEmbeddingFunction
|
||||||
GoogleVertexEmbeddingFunction,
|
|
||||||
)
|
|
||||||
|
|
||||||
return GoogleVertexEmbeddingFunction(
|
return GoogleVertexEmbeddingFunction(
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
api_key=config.get("api_key"),
|
api_key=config.get("api_key"),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_google(config, model_name):
|
def _configure_google(config: Dict[str, Any], model_name: str) -> EmbeddingFunction:
|
||||||
from chromadb.utils.embedding_functions.google_embedding_function import (
|
from chromadb.utils.embedding_functions.google_embedding_function import GoogleGenerativeAiEmbeddingFunction
|
||||||
GoogleGenerativeAiEmbeddingFunction,
|
|
||||||
)
|
|
||||||
|
|
||||||
return GoogleGenerativeAiEmbeddingFunction(
|
return GoogleGenerativeAiEmbeddingFunction(
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
api_key=config.get("api_key"),
|
api_key=config.get("api_key"),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_cohere(config, model_name):
|
def _configure_cohere(config: Dict[str, Any], model_name: str) -> EmbeddingFunction:
|
||||||
from chromadb.utils.embedding_functions.cohere_embedding_function import (
|
from chromadb.utils.embedding_functions.cohere_embedding_function import CohereEmbeddingFunction
|
||||||
CohereEmbeddingFunction,
|
|
||||||
)
|
|
||||||
|
|
||||||
return CohereEmbeddingFunction(
|
return CohereEmbeddingFunction(
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
api_key=config.get("api_key"),
|
api_key=config.get("api_key"),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_bedrock(config, model_name):
|
def _configure_bedrock(config: Dict[str, Any], model_name: str) -> EmbeddingFunction:
|
||||||
from chromadb.utils.embedding_functions.amazon_bedrock_embedding_function import (
|
from chromadb.utils.embedding_functions.amazon_bedrock_embedding_function import AmazonBedrockEmbeddingFunction
|
||||||
AmazonBedrockEmbeddingFunction,
|
|
||||||
)
|
|
||||||
|
|
||||||
return AmazonBedrockEmbeddingFunction(
|
return AmazonBedrockEmbeddingFunction(
|
||||||
session=config.get("session"),
|
session=config.get("session"),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_huggingface(config, model_name):
|
def _configure_huggingface(config: Dict[str, Any], model_name: str) -> EmbeddingFunction:
|
||||||
from chromadb.utils.embedding_functions.huggingface_embedding_function import (
|
from chromadb.utils.embedding_functions.huggingface_embedding_function import HuggingFaceEmbeddingServer
|
||||||
HuggingFaceEmbeddingServer,
|
|
||||||
)
|
|
||||||
|
|
||||||
return HuggingFaceEmbeddingServer(
|
return HuggingFaceEmbeddingServer(
|
||||||
url=config.get("api_url"),
|
url=config.get("api_url"),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_watson(config, model_name):
|
def _configure_watson(config: Dict[str, Any], model_name: str) -> EmbeddingFunction:
|
||||||
try:
|
try:
|
||||||
import ibm_watsonx_ai.foundation_models as watson_models
|
import ibm_watsonx_ai.foundation_models as watson_models
|
||||||
from ibm_watsonx_ai import Credentials
|
from ibm_watsonx_ai import Credentials
|
||||||
from ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames as EmbedParams
|
from ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames as EmbedParams
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
raise ImportError(
|
raise EmbeddingConfigurationError(
|
||||||
"IBM Watson dependencies are not installed. Please install them to use Watson embedding."
|
"IBM Watson dependencies are not installed. Please install them to use Watson embedding.",
|
||||||
) from e
|
provider="watson"
|
||||||
|
)
|
||||||
|
|
||||||
class WatsonEmbeddingFunction(EmbeddingFunction):
|
class WatsonEmbeddingFunction(EmbeddingFunction):
|
||||||
def __call__(self, input: Documents) -> Embeddings:
|
def __call__(self, input: Documents) -> Embeddings:
|
||||||
@@ -178,7 +172,6 @@ class EmbeddingConfigurator:
|
|||||||
embeddings = embedding.embed_documents(input)
|
embeddings = embedding.embed_documents(input)
|
||||||
return cast(Embeddings, embeddings)
|
return cast(Embeddings, embeddings)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("Error during Watson embedding:", e)
|
raise EmbeddingInitializationError("watson", str(e))
|
||||||
raise e
|
|
||||||
|
|
||||||
return WatsonEmbeddingFunction()
|
return WatsonEmbeddingFunction()
|
||||||
|
|||||||
39
src/crewai/utilities/errors.py
Normal file
39
src/crewai/utilities/errors.py
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
"""Error message definitions for CrewAI database operations."""
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
class DatabaseOperationError(Exception):
|
||||||
|
"""Base exception class for database operation errors."""
|
||||||
|
|
||||||
|
def __init__(self, message: str, original_error: Optional[Exception] = None):
|
||||||
|
"""Initialize the database operation error.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message: The error message to display
|
||||||
|
original_error: The original exception that caused this error, if any
|
||||||
|
"""
|
||||||
|
super().__init__(message)
|
||||||
|
self.original_error = original_error
|
||||||
|
|
||||||
|
|
||||||
|
class DatabaseError:
|
||||||
|
"""Standardized error message templates for database operations."""
|
||||||
|
|
||||||
|
INIT_ERROR: str = "Database initialization error: {}"
|
||||||
|
SAVE_ERROR: str = "Error saving task outputs: {}"
|
||||||
|
UPDATE_ERROR: str = "Error updating task outputs: {}"
|
||||||
|
LOAD_ERROR: str = "Error loading task outputs: {}"
|
||||||
|
DELETE_ERROR: str = "Error deleting task outputs: {}"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def format_error(cls, template: str, error: Exception) -> str:
|
||||||
|
"""Format an error message with the given template and error.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
template: The error message template to use
|
||||||
|
error: The exception to format into the template
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The formatted error message
|
||||||
|
"""
|
||||||
|
return template.format(str(error))
|
||||||
@@ -180,12 +180,12 @@ class CrewEvaluator:
|
|||||||
self._test_result_span = self._telemetry.individual_test_result_span(
|
self._test_result_span = self._telemetry.individual_test_result_span(
|
||||||
self.crew,
|
self.crew,
|
||||||
evaluation_result.pydantic.quality,
|
evaluation_result.pydantic.quality,
|
||||||
current_task._execution_time,
|
current_task.execution_duration,
|
||||||
self.openai_model_name,
|
self.openai_model_name,
|
||||||
)
|
)
|
||||||
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
|
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
|
||||||
self.run_execution_times[self.iteration].append(
|
self.run_execution_times[self.iteration].append(
|
||||||
current_task._execution_time
|
current_task.execution_duration
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Evaluation result is not in the expected format")
|
raise ValueError("Evaluation result is not in the expected format")
|
||||||
|
|||||||
@@ -92,13 +92,34 @@ class TaskEvaluator:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
output_training_data = training_data[agent_id]
|
output_training_data = training_data[agent_id]
|
||||||
|
|
||||||
final_aggregated_data = ""
|
final_aggregated_data = ""
|
||||||
for _, data in output_training_data.items():
|
|
||||||
|
for iteration, data in output_training_data.items():
|
||||||
|
improved_output = data.get("improved_output")
|
||||||
|
initial_output = data.get("initial_output")
|
||||||
|
human_feedback = data.get("human_feedback")
|
||||||
|
|
||||||
|
if not all([improved_output, initial_output, human_feedback]):
|
||||||
|
missing_fields = [
|
||||||
|
field
|
||||||
|
for field in ["improved_output", "initial_output", "human_feedback"]
|
||||||
|
if not data.get(field)
|
||||||
|
]
|
||||||
|
error_msg = (
|
||||||
|
f"Critical training data error: Missing fields ({', '.join(missing_fields)}) "
|
||||||
|
f"for agent {agent_id} in iteration {iteration}.\n"
|
||||||
|
"This indicates a broken training process. "
|
||||||
|
"Cannot proceed with evaluation.\n"
|
||||||
|
"Please check your training implementation."
|
||||||
|
)
|
||||||
|
raise ValueError(error_msg)
|
||||||
|
|
||||||
final_aggregated_data += (
|
final_aggregated_data += (
|
||||||
f"Initial Output:\n{data['initial_output']}\n\n"
|
f"Iteration: {iteration}\n"
|
||||||
f"Human Feedback:\n{data['human_feedback']}\n\n"
|
f"Initial Output:\n{initial_output}\n\n"
|
||||||
f"Improved Output:\n{data['improved_output']}\n\n"
|
f"Human Feedback:\n{human_feedback}\n\n"
|
||||||
|
f"Improved Output:\n{improved_output}\n\n"
|
||||||
|
"------------------------------------------------\n\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
evaluation_query = (
|
evaluation_query = (
|
||||||
|
|||||||
20
src/crewai/utilities/exceptions/embedding_exceptions.py
Normal file
20
src/crewai/utilities/exceptions/embedding_exceptions.py
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
|
||||||
|
class EmbeddingConfigurationError(Exception):
|
||||||
|
def __init__(self, message: str, provider: Optional[str] = None):
|
||||||
|
self.message = message
|
||||||
|
self.provider = provider
|
||||||
|
super().__init__(self.message)
|
||||||
|
|
||||||
|
|
||||||
|
class EmbeddingProviderError(EmbeddingConfigurationError):
|
||||||
|
def __init__(self, provider: str, supported_providers: List[str]):
|
||||||
|
message = f"Unsupported embedding provider: {provider}, supported providers: {supported_providers}"
|
||||||
|
super().__init__(message, provider)
|
||||||
|
|
||||||
|
|
||||||
|
class EmbeddingInitializationError(EmbeddingConfigurationError):
|
||||||
|
def __init__(self, provider: str, error: str):
|
||||||
|
message = f"Failed to initialize embedding function for provider {provider}: {error}"
|
||||||
|
super().__init__(message, provider)
|
||||||
@@ -1,30 +1,64 @@
|
|||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import pickle
|
import pickle
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
|
||||||
class FileHandler:
|
class FileHandler:
|
||||||
"""take care of file operations, currently it only logs messages to a file"""
|
"""Handler for file operations supporting both JSON and text-based logging.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path (Union[bool, str]): Path to the log file or boolean flag
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, file_path):
|
def __init__(self, file_path: Union[bool, str]):
|
||||||
if isinstance(file_path, bool):
|
self._initialize_path(file_path)
|
||||||
|
|
||||||
|
def _initialize_path(self, file_path: Union[bool, str]):
|
||||||
|
if file_path is True: # File path is boolean True
|
||||||
self._path = os.path.join(os.curdir, "logs.txt")
|
self._path = os.path.join(os.curdir, "logs.txt")
|
||||||
elif isinstance(file_path, str):
|
|
||||||
self._path = file_path
|
elif isinstance(file_path, str): # File path is a string
|
||||||
|
if file_path.endswith((".json", ".txt")):
|
||||||
|
self._path = file_path # No modification if the file ends with .json or .txt
|
||||||
|
else:
|
||||||
|
self._path = file_path + ".txt" # Append .txt if the file doesn't end with .json or .txt
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise ValueError("file_path must be either a boolean or a string.")
|
raise ValueError("file_path must be a string or boolean.") # Handle the case where file_path isn't valid
|
||||||
|
|
||||||
def log(self, **kwargs):
|
def log(self, **kwargs):
|
||||||
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
try:
|
||||||
message = (
|
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||||
f"{now}: "
|
log_entry = {"timestamp": now, **kwargs}
|
||||||
+ ", ".join([f'{key}="{value}"' for key, value in kwargs.items()])
|
|
||||||
+ "\n"
|
|
||||||
)
|
|
||||||
with open(self._path, "a", encoding="utf-8") as file:
|
|
||||||
file.write(message + "\n")
|
|
||||||
|
|
||||||
|
if self._path.endswith(".json"):
|
||||||
|
# Append log in JSON format
|
||||||
|
with open(self._path, "a", encoding="utf-8") as file:
|
||||||
|
# If the file is empty, start with a list; else, append to it
|
||||||
|
try:
|
||||||
|
# Try reading existing content to avoid overwriting
|
||||||
|
with open(self._path, "r", encoding="utf-8") as read_file:
|
||||||
|
existing_data = json.load(read_file)
|
||||||
|
existing_data.append(log_entry)
|
||||||
|
except (json.JSONDecodeError, FileNotFoundError):
|
||||||
|
# If no valid JSON or file doesn't exist, start with an empty list
|
||||||
|
existing_data = [log_entry]
|
||||||
|
|
||||||
|
with open(self._path, "w", encoding="utf-8") as write_file:
|
||||||
|
json.dump(existing_data, write_file, indent=4)
|
||||||
|
write_file.write("\n")
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Append log in plain text format
|
||||||
|
message = f"{now}: " + ", ".join([f"{key}=\"{value}\"" for key, value in kwargs.items()]) + "\n"
|
||||||
|
with open(self._path, "a", encoding="utf-8") as file:
|
||||||
|
file.write(message)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f"Failed to log message: {str(e)}")
|
||||||
|
|
||||||
class PickleHandler:
|
class PickleHandler:
|
||||||
def __init__(self, file_name: str) -> None:
|
def __init__(self, file_name: str) -> None:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -4,8 +4,10 @@ from typing import Dict, Optional, Union
|
|||||||
|
|
||||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||||
|
|
||||||
|
"""Internationalization support for CrewAI prompts and messages."""
|
||||||
|
|
||||||
class I18N(BaseModel):
|
class I18N(BaseModel):
|
||||||
|
"""Handles loading and retrieving internationalized prompts."""
|
||||||
_prompts: Dict[str, Dict[str, str]] = PrivateAttr()
|
_prompts: Dict[str, Dict[str, str]] = PrivateAttr()
|
||||||
prompt_file: Optional[str] = Field(
|
prompt_file: Optional[str] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
|
|||||||
@@ -11,12 +11,10 @@ class InternalInstructor:
|
|||||||
model: Type,
|
model: Type,
|
||||||
agent: Optional[Any] = None,
|
agent: Optional[Any] = None,
|
||||||
llm: Optional[str] = None,
|
llm: Optional[str] = None,
|
||||||
instructions: Optional[str] = None,
|
|
||||||
):
|
):
|
||||||
self.content = content
|
self.content = content
|
||||||
self.agent = agent
|
self.agent = agent
|
||||||
self.llm = llm
|
self.llm = llm
|
||||||
self.instructions = instructions
|
|
||||||
self.model = model
|
self.model = model
|
||||||
self._client = None
|
self._client = None
|
||||||
self.set_instructor()
|
self.set_instructor()
|
||||||
@@ -31,10 +29,7 @@ class InternalInstructor:
|
|||||||
import instructor
|
import instructor
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
self._client = instructor.from_litellm(
|
self._client = instructor.from_litellm(completion)
|
||||||
completion,
|
|
||||||
mode=instructor.Mode.TOOLS,
|
|
||||||
)
|
|
||||||
|
|
||||||
def to_json(self):
|
def to_json(self):
|
||||||
model = self.to_pydantic()
|
model = self.to_pydantic()
|
||||||
@@ -42,8 +37,6 @@ class InternalInstructor:
|
|||||||
|
|
||||||
def to_pydantic(self):
|
def to_pydantic(self):
|
||||||
messages = [{"role": "user", "content": self.content}]
|
messages = [{"role": "user", "content": self.content}]
|
||||||
if self.instructions:
|
|
||||||
messages.append({"role": "system", "content": self.instructions})
|
|
||||||
model = self._client.chat.completions.create(
|
model = self._client.chat.completions.create(
|
||||||
model=self.llm.model, response_model=self.model, messages=messages
|
model=self.llm.model, response_model=self.model, messages=messages
|
||||||
)
|
)
|
||||||
|
|||||||
194
src/crewai/utilities/llm_utils.py
Normal file
194
src/crewai/utilities/llm_utils.py
Normal file
@@ -0,0 +1,194 @@
|
|||||||
|
import os
|
||||||
|
from typing import Any, Dict, List, Optional, Union
|
||||||
|
|
||||||
|
from crewai.cli.constants import DEFAULT_LLM_MODEL, ENV_VARS, LITELLM_PARAMS
|
||||||
|
from crewai.llm import LLM
|
||||||
|
|
||||||
|
|
||||||
|
def create_llm(
|
||||||
|
llm_value: Union[str, LLM, Any, None] = None,
|
||||||
|
) -> Optional[LLM]:
|
||||||
|
"""
|
||||||
|
Creates or returns an LLM instance based on the given llm_value.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
llm_value (str | LLM | Any | None):
|
||||||
|
- str: The model name (e.g., "gpt-4").
|
||||||
|
- LLM: Already instantiated LLM, returned as-is.
|
||||||
|
- Any: Attempt to extract known attributes like model_name, temperature, etc.
|
||||||
|
- None: Use environment-based or fallback default model.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An LLM instance if successful, or None if something fails.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# 1) If llm_value is already an LLM object, return it directly
|
||||||
|
if isinstance(llm_value, LLM):
|
||||||
|
return llm_value
|
||||||
|
|
||||||
|
# 2) If llm_value is a string (model name)
|
||||||
|
if isinstance(llm_value, str):
|
||||||
|
try:
|
||||||
|
created_llm = LLM(model=llm_value)
|
||||||
|
return created_llm
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to instantiate LLM with model='{llm_value}': {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 3) If llm_value is None, parse environment variables or use default
|
||||||
|
if llm_value is None:
|
||||||
|
return _llm_via_environment_or_fallback()
|
||||||
|
|
||||||
|
# 4) Otherwise, attempt to extract relevant attributes from an unknown object
|
||||||
|
try:
|
||||||
|
# Extract attributes with explicit types
|
||||||
|
model = (
|
||||||
|
getattr(llm_value, "model_name", None)
|
||||||
|
or getattr(llm_value, "deployment_name", None)
|
||||||
|
or str(llm_value)
|
||||||
|
)
|
||||||
|
temperature: Optional[float] = getattr(llm_value, "temperature", None)
|
||||||
|
max_tokens: Optional[int] = getattr(llm_value, "max_tokens", None)
|
||||||
|
logprobs: Optional[int] = getattr(llm_value, "logprobs", None)
|
||||||
|
timeout: Optional[float] = getattr(llm_value, "timeout", None)
|
||||||
|
api_key: Optional[str] = getattr(llm_value, "api_key", None)
|
||||||
|
base_url: Optional[str] = getattr(llm_value, "base_url", None)
|
||||||
|
api_base: Optional[str] = getattr(llm_value, "api_base", None)
|
||||||
|
|
||||||
|
created_llm = LLM(
|
||||||
|
model=model,
|
||||||
|
temperature=temperature,
|
||||||
|
max_tokens=max_tokens,
|
||||||
|
logprobs=logprobs,
|
||||||
|
timeout=timeout,
|
||||||
|
api_key=api_key,
|
||||||
|
base_url=base_url,
|
||||||
|
api_base=api_base,
|
||||||
|
)
|
||||||
|
return created_llm
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error instantiating LLM from unknown object type: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _llm_via_environment_or_fallback() -> Optional[LLM]:
|
||||||
|
"""
|
||||||
|
Helper function: if llm_value is None, we load environment variables or fallback default model.
|
||||||
|
"""
|
||||||
|
model_name = (
|
||||||
|
os.environ.get("OPENAI_MODEL_NAME")
|
||||||
|
or os.environ.get("MODEL")
|
||||||
|
or DEFAULT_LLM_MODEL
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize parameters with correct types
|
||||||
|
model: str = model_name
|
||||||
|
temperature: Optional[float] = None
|
||||||
|
max_tokens: Optional[int] = None
|
||||||
|
max_completion_tokens: Optional[int] = None
|
||||||
|
logprobs: Optional[int] = None
|
||||||
|
timeout: Optional[float] = None
|
||||||
|
api_key: Optional[str] = None
|
||||||
|
base_url: Optional[str] = None
|
||||||
|
api_version: Optional[str] = None
|
||||||
|
presence_penalty: Optional[float] = None
|
||||||
|
frequency_penalty: Optional[float] = None
|
||||||
|
top_p: Optional[float] = None
|
||||||
|
n: Optional[int] = None
|
||||||
|
stop: Optional[Union[str, List[str]]] = None
|
||||||
|
logit_bias: Optional[Dict[int, float]] = None
|
||||||
|
response_format: Optional[Dict[str, Any]] = None
|
||||||
|
seed: Optional[int] = None
|
||||||
|
top_logprobs: Optional[int] = None
|
||||||
|
callbacks: List[Any] = []
|
||||||
|
|
||||||
|
# Optional base URL from env
|
||||||
|
base_url = (
|
||||||
|
os.environ.get("BASE_URL")
|
||||||
|
or os.environ.get("OPENAI_API_BASE")
|
||||||
|
or os.environ.get("OPENAI_BASE_URL")
|
||||||
|
)
|
||||||
|
|
||||||
|
api_base = os.environ.get("API_BASE") or os.environ.get("AZURE_API_BASE")
|
||||||
|
|
||||||
|
# Synchronize base_url and api_base if one is populated and the other is not
|
||||||
|
if base_url and not api_base:
|
||||||
|
api_base = base_url
|
||||||
|
elif api_base and not base_url:
|
||||||
|
base_url = api_base
|
||||||
|
|
||||||
|
# Initialize llm_params dictionary
|
||||||
|
llm_params: Dict[str, Any] = {
|
||||||
|
"model": model,
|
||||||
|
"temperature": temperature,
|
||||||
|
"max_tokens": max_tokens,
|
||||||
|
"max_completion_tokens": max_completion_tokens,
|
||||||
|
"logprobs": logprobs,
|
||||||
|
"timeout": timeout,
|
||||||
|
"api_key": api_key,
|
||||||
|
"base_url": base_url,
|
||||||
|
"api_base": api_base,
|
||||||
|
"api_version": api_version,
|
||||||
|
"presence_penalty": presence_penalty,
|
||||||
|
"frequency_penalty": frequency_penalty,
|
||||||
|
"top_p": top_p,
|
||||||
|
"n": n,
|
||||||
|
"stop": stop,
|
||||||
|
"logit_bias": logit_bias,
|
||||||
|
"response_format": response_format,
|
||||||
|
"seed": seed,
|
||||||
|
"top_logprobs": top_logprobs,
|
||||||
|
"callbacks": callbacks,
|
||||||
|
}
|
||||||
|
|
||||||
|
UNACCEPTED_ATTRIBUTES = [
|
||||||
|
"AWS_ACCESS_KEY_ID",
|
||||||
|
"AWS_SECRET_ACCESS_KEY",
|
||||||
|
"AWS_REGION_NAME",
|
||||||
|
]
|
||||||
|
set_provider = model_name.split("/")[0] if "/" in model_name else "openai"
|
||||||
|
|
||||||
|
if set_provider in ENV_VARS:
|
||||||
|
env_vars_for_provider = ENV_VARS[set_provider]
|
||||||
|
if isinstance(env_vars_for_provider, (list, tuple)):
|
||||||
|
for env_var in env_vars_for_provider:
|
||||||
|
key_name = env_var.get("key_name")
|
||||||
|
if key_name and key_name not in UNACCEPTED_ATTRIBUTES:
|
||||||
|
env_value = os.environ.get(key_name)
|
||||||
|
if env_value:
|
||||||
|
# Map environment variable names to recognized parameters
|
||||||
|
param_key = _normalize_key_name(key_name.lower())
|
||||||
|
llm_params[param_key] = env_value
|
||||||
|
elif isinstance(env_var, dict):
|
||||||
|
if env_var.get("default", False):
|
||||||
|
for key, value in env_var.items():
|
||||||
|
if key not in ["prompt", "key_name", "default"]:
|
||||||
|
llm_params[key.lower()] = value
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
f"Expected env_var to be a dictionary, but got {type(env_var)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Remove None values
|
||||||
|
llm_params = {k: v for k, v in llm_params.items() if v is not None}
|
||||||
|
|
||||||
|
# Try creating the LLM
|
||||||
|
try:
|
||||||
|
new_llm = LLM(**llm_params)
|
||||||
|
return new_llm
|
||||||
|
except Exception as e:
|
||||||
|
print(
|
||||||
|
f"Error instantiating LLM from environment/fallback: {type(e).__name__}: {e}"
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_key_name(key_name: str) -> str:
|
||||||
|
"""
|
||||||
|
Maps environment variable names to recognized litellm parameter keys,
|
||||||
|
using patterns from LITELLM_PARAMS.
|
||||||
|
"""
|
||||||
|
for pattern in LITELLM_PARAMS:
|
||||||
|
if pattern in key_name:
|
||||||
|
return pattern
|
||||||
|
return key_name
|
||||||
@@ -3,17 +3,24 @@ from pathlib import Path
|
|||||||
|
|
||||||
import appdirs
|
import appdirs
|
||||||
|
|
||||||
|
"""Path management utilities for CrewAI storage and configuration."""
|
||||||
|
|
||||||
def db_storage_path():
|
def db_storage_path() -> str:
|
||||||
|
"""Returns the path for SQLite database storage.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Full path to the SQLite database file
|
||||||
|
"""
|
||||||
app_name = get_project_directory_name()
|
app_name = get_project_directory_name()
|
||||||
app_author = "CrewAI"
|
app_author = "CrewAI"
|
||||||
|
|
||||||
data_dir = Path(appdirs.user_data_dir(app_name, app_author))
|
data_dir = Path(appdirs.user_data_dir(app_name, app_author))
|
||||||
data_dir.mkdir(parents=True, exist_ok=True)
|
data_dir.mkdir(parents=True, exist_ok=True)
|
||||||
return data_dir
|
return str(data_dir)
|
||||||
|
|
||||||
|
|
||||||
def get_project_directory_name():
|
def get_project_directory_name():
|
||||||
|
"""Returns the current project directory name."""
|
||||||
project_directory_name = os.environ.get("CREWAI_STORAGE_DIR")
|
project_directory_name = os.environ.get("CREWAI_STORAGE_DIR")
|
||||||
|
|
||||||
if project_directory_name:
|
if project_directory_name:
|
||||||
@@ -21,4 +28,4 @@ def get_project_directory_name():
|
|||||||
else:
|
else:
|
||||||
cwd = Path.cwd()
|
cwd = Path.cwd()
|
||||||
project_directory_name = cwd.name
|
project_directory_name = cwd.name
|
||||||
return project_directory_name
|
return project_directory_name
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
import json
|
|
||||||
import logging
|
import logging
|
||||||
from typing import Any, List, Optional
|
from typing import Any, List, Optional
|
||||||
|
|
||||||
@@ -7,10 +6,11 @@ from pydantic import BaseModel, Field
|
|||||||
from crewai.agent import Agent
|
from crewai.agent import Agent
|
||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
|
|
||||||
|
"""Handles planning and coordination of crew tasks."""
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class PlanPerTask(BaseModel):
|
class PlanPerTask(BaseModel):
|
||||||
|
"""Represents a plan for a specific task."""
|
||||||
task: str = Field(..., description="The task for which the plan is created")
|
task: str = Field(..., description="The task for which the plan is created")
|
||||||
plan: str = Field(
|
plan: str = Field(
|
||||||
...,
|
...,
|
||||||
@@ -19,6 +19,7 @@ class PlanPerTask(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
class PlannerTaskPydanticOutput(BaseModel):
|
class PlannerTaskPydanticOutput(BaseModel):
|
||||||
|
"""Output format for task planning results."""
|
||||||
list_of_plans_per_task: List[PlanPerTask] = Field(
|
list_of_plans_per_task: List[PlanPerTask] = Field(
|
||||||
...,
|
...,
|
||||||
description="Step by step plan on how the agents can execute their tasks using the available tools with mastery",
|
description="Step by step plan on how the agents can execute their tasks using the available tools with mastery",
|
||||||
@@ -26,6 +27,7 @@ class PlannerTaskPydanticOutput(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
class CrewPlanner:
|
class CrewPlanner:
|
||||||
|
"""Plans and coordinates the execution of crew tasks."""
|
||||||
def __init__(self, tasks: List[Task], planning_agent_llm: Optional[Any] = None):
|
def __init__(self, tasks: List[Task], planning_agent_llm: Optional[Any] = None):
|
||||||
self.tasks = tasks
|
self.tasks = tasks
|
||||||
|
|
||||||
@@ -75,10 +77,10 @@ class CrewPlanner:
|
|||||||
def _get_agent_knowledge(self, task: Task) -> List[str]:
|
def _get_agent_knowledge(self, task: Task) -> List[str]:
|
||||||
"""
|
"""
|
||||||
Safely retrieve knowledge source content from the task's agent.
|
Safely retrieve knowledge source content from the task's agent.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
task: The task containing an agent with potential knowledge sources
|
task: The task containing an agent with potential knowledge sources
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List[str]: A list of knowledge source strings
|
List[str]: A list of knowledge source strings
|
||||||
"""
|
"""
|
||||||
@@ -105,6 +107,6 @@ class CrewPlanner:
|
|||||||
f"[{', '.join(str(tool) for tool in task.agent.tools)}]" if task.agent and task.agent.tools else '"agent has no tools"',
|
f"[{', '.join(str(tool) for tool in task.agent.tools)}]" if task.agent and task.agent.tools else '"agent has no tools"',
|
||||||
f',\n "agent_knowledge": "[\\"{knowledge_list[0]}\\"]"' if knowledge_list and str(knowledge_list) != "None" else ""
|
f',\n "agent_knowledge": "[\\"{knowledge_list[0]}\\"]"' if knowledge_list and str(knowledge_list) != "None" else ""
|
||||||
)
|
)
|
||||||
|
|
||||||
tasks_summary.append(task_summary)
|
tasks_summary.append(task_summary)
|
||||||
return " ".join(tasks_summary)
|
return " ".join(tasks_summary)
|
||||||
|
|||||||
@@ -1,7 +1,11 @@
|
|||||||
|
"""Utility for colored console output."""
|
||||||
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
class Printer:
|
class Printer:
|
||||||
|
"""Handles colored console output formatting."""
|
||||||
|
|
||||||
def print(self, content: str, color: Optional[str] = None):
|
def print(self, content: str, color: Optional[str] = None):
|
||||||
if color == "purple":
|
if color == "purple":
|
||||||
self._print_purple(content)
|
self._print_purple(content)
|
||||||
@@ -17,6 +21,16 @@ class Printer:
|
|||||||
self._print_yellow(content)
|
self._print_yellow(content)
|
||||||
elif color == "bold_yellow":
|
elif color == "bold_yellow":
|
||||||
self._print_bold_yellow(content)
|
self._print_bold_yellow(content)
|
||||||
|
elif color == "cyan":
|
||||||
|
self._print_cyan(content)
|
||||||
|
elif color == "bold_cyan":
|
||||||
|
self._print_bold_cyan(content)
|
||||||
|
elif color == "magenta":
|
||||||
|
self._print_magenta(content)
|
||||||
|
elif color == "bold_magenta":
|
||||||
|
self._print_bold_magenta(content)
|
||||||
|
elif color == "green":
|
||||||
|
self._print_green(content)
|
||||||
else:
|
else:
|
||||||
print(content)
|
print(content)
|
||||||
|
|
||||||
@@ -40,3 +54,18 @@ class Printer:
|
|||||||
|
|
||||||
def _print_bold_yellow(self, content):
|
def _print_bold_yellow(self, content):
|
||||||
print("\033[1m\033[93m {}\033[00m".format(content))
|
print("\033[1m\033[93m {}\033[00m".format(content))
|
||||||
|
|
||||||
|
def _print_cyan(self, content):
|
||||||
|
print("\033[96m {}\033[00m".format(content))
|
||||||
|
|
||||||
|
def _print_bold_cyan(self, content):
|
||||||
|
print("\033[1m\033[96m {}\033[00m".format(content))
|
||||||
|
|
||||||
|
def _print_magenta(self, content):
|
||||||
|
print("\033[35m {}\033[00m".format(content))
|
||||||
|
|
||||||
|
def _print_bold_magenta(self, content):
|
||||||
|
print("\033[1m\033[35m {}\033[00m".format(content))
|
||||||
|
|
||||||
|
def _print_green(self, content):
|
||||||
|
print("\033[32m {}\033[00m".format(content))
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user