Merge branch 'main' of github.com:crewAIInc/crewAI into better/event-emitter

This commit is contained in:
Lorenze Jay
2025-02-11 14:33:08 -08:00
47 changed files with 1692 additions and 351 deletions

View File

@@ -1,10 +1,18 @@
<div align="center"> <div align="center">
![Logo of CrewAI, two people rowing on a boat](./docs/crewai_logo.png) ![Logo of CrewAI](./docs/crewai_logo.png)
# **CrewAI** # **CrewAI**
🤖 **CrewAI**: Production-grade framework for orchestrating sophisticated AI agent systems. From simple automations to complex real-world applications, CrewAI provides precise control and deep customization. By fostering collaborative intelligence through flexible, production-ready architecture, CrewAI empowers agents to work together seamlessly, tackling complex business challenges with predictable, consistent results. **CrewAI**: Production-grade framework for orchestrating sophisticated AI agent systems. From simple automations to complex real-world applications, CrewAI provides precise control and deep customization. By fostering collaborative intelligence through flexible, production-ready architecture, CrewAI empowers agents to work together seamlessly, tackling complex business challenges with predictable, consistent results.
**CrewAI Enterprise**
Want to plan, build (+ no code), deploy, monitor and interare your agents: [CrewAI Enterprise](https://www.crewai.com/enterprise). Designed for complex, real-world applications, our enterprise solution offers:
- **Seamless Integrations**
- **Scalable & Secure Deployment**
- **Actionable Insights**
- **24/7 Support**
<h3> <h3>
@@ -190,7 +198,7 @@ research_task:
description: > description: >
Conduct a thorough research about {topic} Conduct a thorough research about {topic}
Make sure you find any interesting and relevant information given Make sure you find any interesting and relevant information given
the current year is 2024. the current year is 2025.
expected_output: > expected_output: >
A list with 10 bullet points of the most relevant information about {topic} A list with 10 bullet points of the most relevant information about {topic}
agent: researcher agent: researcher
@@ -392,7 +400,7 @@ class AdvancedAnalysisFlow(Flow[MarketState]):
goal="Gather and validate supporting market data", goal="Gather and validate supporting market data",
backstory="You excel at finding and correlating multiple data sources" backstory="You excel at finding and correlating multiple data sources"
) )
analysis_task = Task( analysis_task = Task(
description="Analyze {sector} sector data for the past {timeframe}", description="Analyze {sector} sector data for the past {timeframe}",
expected_output="Detailed market analysis with confidence score", expected_output="Detailed market analysis with confidence score",
@@ -403,7 +411,7 @@ class AdvancedAnalysisFlow(Flow[MarketState]):
expected_output="Corroborating evidence and potential contradictions", expected_output="Corroborating evidence and potential contradictions",
agent=researcher agent=researcher
) )
# Demonstrate crew autonomy # Demonstrate crew autonomy
analysis_crew = Crew( analysis_crew = Crew(
agents=[analyst, researcher], agents=[analyst, researcher],

View File

@@ -23,14 +23,14 @@ A crew in crewAI represents a collaborative group of agents working together to
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. | | **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. | | **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). | | **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. | | **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. | | **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. | | **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. | | **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
| **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. | | **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
| **Task Callback** _(optional)_ | `task_callback` | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. | | **Task Callback** _(optional)_ | `task_callback` | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. |
| **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. | | **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
| **Output Log File** _(optional)_ | `output_log_file` | Whether you want to have a file with the complete crew output and execution. You can set it using True and it will default to the folder you are currently in and it will be called logs.txt or passing a string with the full path and name of the file. | | **Output Log File** _(optional)_ | `output_log_file` | Set to True to save logs as logs.txt in the current directory or provide a file path. Logs will be in JSON format if the filename ends in .json, otherwise .txt. Defautls to `None`. |
| **Manager Agent** _(optional)_ | `manager_agent` | `manager` sets a custom agent that will be used as a manager. | | **Manager Agent** _(optional)_ | `manager_agent` | `manager` sets a custom agent that will be used as a manager. |
| **Prompt File** _(optional)_ | `prompt_file` | Path to the prompt JSON file to be used for the crew. | | **Prompt File** _(optional)_ | `prompt_file` | Path to the prompt JSON file to be used for the crew. |
| **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. | | **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. |
@@ -240,6 +240,23 @@ print(f"Tasks Output: {crew_output.tasks_output}")
print(f"Token Usage: {crew_output.token_usage}") print(f"Token Usage: {crew_output.token_usage}")
``` ```
## Accessing Crew Logs
You can see real time log of the crew execution, by setting `output_log_file` as a `True(Boolean)` or a `file_name(str)`. Supports logging of events as both `file_name.txt` and `file_name.json`.
In case of `True(Boolean)` will save as `logs.txt`.
In case of `output_log_file` is set as `False(Booelan)` or `None`, the logs will not be populated.
```python Code
# Save crew logs
crew = Crew(output_log_file = True) # Logs will be saved as logs.txt
crew = Crew(output_log_file = file_name) # Logs will be saved as file_name.txt
crew = Crew(output_log_file = file_name.txt) # Logs will be saved as file_name.txt
crew = Crew(output_log_file = file_name.json) # Logs will be saved as file_name.json
```
## Memory Utilization ## Memory Utilization
Crews can utilize memory (short-term, long-term, and entity memory) to enhance their execution and learning over time. This feature allows crews to store and recall execution memories, aiding in decision-making and task execution strategies. Crews can utilize memory (short-term, long-term, and entity memory) to enhance their execution and learning over time. This feature allows crews to store and recall execution memories, aiding in decision-making and task execution strategies.

View File

@@ -232,18 +232,18 @@ class UnstructuredExampleFlow(Flow):
def first_method(self): def first_method(self):
# The state automatically includes an 'id' field # The state automatically includes an 'id' field
print(f"State ID: {self.state['id']}") print(f"State ID: {self.state['id']}")
self.state.message = "Hello from structured flow" self.state['counter'] = 0
self.state.counter = 0 self.state['message'] = "Hello from structured flow"
@listen(first_method) @listen(first_method)
def second_method(self): def second_method(self):
self.state.counter += 1 self.state['counter'] += 1
self.state.message += " - updated" self.state['message'] += " - updated"
@listen(second_method) @listen(second_method)
def third_method(self): def third_method(self):
self.state.counter += 1 self.state['counter'] += 1
self.state.message += " - updated again" self.state['message'] += " - updated again"
print(f"State after third_method: {self.state}") print(f"State after third_method: {self.state}")

View File

@@ -91,7 +91,7 @@ result = crew.kickoff(inputs={"question": "What city does John live in and how o
``` ```
Here's another example with the `CrewDoclingSource`. The CrewDoclingSource is actually quite versatile and can handle multiple file formats including TXT, PDF, DOCX, HTML, and more. Here's another example with the `CrewDoclingSource`. The CrewDoclingSource is actually quite versatile and can handle multiple file formats including MD, PDF, DOCX, HTML, and more.
<Note> <Note>
You need to install `docling` for the following example to work: `uv add docling` You need to install `docling` for the following example to work: `uv add docling`
@@ -152,10 +152,10 @@ Here are examples of how to use different types of knowledge sources:
### Text File Knowledge Source ### Text File Knowledge Source
```python ```python
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource from crewai.knowledge.source.text_file_knowledge_source import TextFileKnowledgeSource
# Create a text file knowledge source # Create a text file knowledge source
text_source = CrewDoclingSource( text_source = TextFileKnowledgeSource(
file_paths=["document.txt", "another.txt"] file_paths=["document.txt", "another.txt"]
) )

View File

@@ -463,26 +463,32 @@ Learn how to get the most out of your LLM configuration:
<Accordion title="Google"> <Accordion title="Google">
```python Code ```python Code
# Option 1. Gemini accessed with an API key. # Option 1: Gemini accessed with an API key.
# https://ai.google.dev/gemini-api/docs/api-key # https://ai.google.dev/gemini-api/docs/api-key
GEMINI_API_KEY=<your-api-key> GEMINI_API_KEY=<your-api-key>
# Option 2. Vertex AI IAM credentials for Gemini, Anthropic, and anything in the Model Garden. # Option 2: Vertex AI IAM credentials for Gemini, Anthropic, and Model Garden.
# https://cloud.google.com/vertex-ai/generative-ai/docs/overview # https://cloud.google.com/vertex-ai/generative-ai/docs/overview
``` ```
## GET CREDENTIALS Get credentials:
```python Code
import json
file_path = 'path/to/vertex_ai_service_account.json' file_path = 'path/to/vertex_ai_service_account.json'
# Load the JSON file # Load the JSON file
with open(file_path, 'r') as file: with open(file_path, 'r') as file:
vertex_credentials = json.load(file) vertex_credentials = json.load(file)
# Convert to JSON string # Convert the credentials to a JSON string
vertex_credentials_json = json.dumps(vertex_credentials) vertex_credentials_json = json.dumps(vertex_credentials)
```
Example usage: Example usage:
```python Code ```python Code
from crewai import LLM
llm = LLM( llm = LLM(
model="gemini/gemini-1.5-pro-latest", model="gemini/gemini-1.5-pro-latest",
temperature=0.7, temperature=0.7,
@@ -720,6 +726,30 @@ Learn how to get the most out of your LLM configuration:
</Accordion> </Accordion>
</AccordionGroup> </AccordionGroup>
## Structured LLM Calls
CrewAI supports structured responses from LLM calls by allowing you to define a `response_format` using a Pydantic model. This enables the framework to automatically parse and validate the output, making it easier to integrate the response into your application without manual post-processing.
For example, you can define a Pydantic model to represent the expected response structure and pass it as the `response_format` when instantiating the LLM. The model will then be used to convert the LLM output into a structured Python object.
```python Code
from crewai import LLM
class Dog(BaseModel):
name: str
age: int
breed: str
llm = LLM(model="gpt-4o", response_format=Dog)
response = llm.call(
"Analyze the following messages and return the name, age, and breed. "
"Meet Kona! She is 3 years old and is a black german shepherd."
)
print(response)
```
## Common Issues and Solutions ## Common Issues and Solutions
<Tabs> <Tabs>

View File

@@ -58,41 +58,107 @@ my_crew = Crew(
### Example: Use Custom Memory Instances e.g FAISS as the VectorDB ### Example: Use Custom Memory Instances e.g FAISS as the VectorDB
```python Code ```python Code
from crewai import Crew, Agent, Task, Process from crewai import Crew, Process
from crewai.memory import LongTermMemory, ShortTermMemory, EntityMemory
from crewai.memory.storage import LTMSQLiteStorage, RAGStorage
from typing import List, Optional
# Assemble your crew with memory capabilities # Assemble your crew with memory capabilities
my_crew = Crew( my_crew: Crew = Crew(
agents=[...], agents = [...],
tasks=[...], tasks = [...],
process="Process.sequential", process = Process.sequential,
memory=True, memory = True,
long_term_memory=EnhanceLongTermMemory( # Long-term memory for persistent storage across sessions
long_term_memory = LongTermMemory(
storage=LTMSQLiteStorage( storage=LTMSQLiteStorage(
db_path="/my_data_dir/my_crew1/long_term_memory_storage.db" db_path="/my_crew1/long_term_memory_storage.db"
) )
), ),
short_term_memory=EnhanceShortTermMemory( # Short-term memory for current context using RAG
storage=CustomRAGStorage( short_term_memory = ShortTermMemory(
crew_name="my_crew", storage = RAGStorage(
storage_type="short_term", embedder_config={
data_dir="//my_data_dir", "provider": "openai",
model=embedder["model"], "config": {
dimension=embedder["dimension"], "model": 'text-embedding-3-small'
}
},
type="short_term",
path="/my_crew1/"
)
), ),
), ),
entity_memory=EnhanceEntityMemory( # Entity memory for tracking key information about entities
storage=CustomRAGStorage( entity_memory = EntityMemory(
crew_name="my_crew", storage=RAGStorage(
storage_type="entities", embedder_config={
data_dir="//my_data_dir", "provider": "openai",
model=embedder["model"], "config": {
dimension=embedder["dimension"], "model": 'text-embedding-3-small'
), }
},
type="short_term",
path="/my_crew1/"
)
), ),
verbose=True, verbose=True,
) )
``` ```
## Security Considerations
When configuring memory storage:
- Use environment variables for storage paths (e.g., `CREWAI_STORAGE_DIR`)
- Never hardcode sensitive information like database credentials
- Consider access permissions for storage directories
- Use relative paths when possible to maintain portability
Example using environment variables:
```python
import os
from crewai import Crew
from crewai.memory import LongTermMemory
from crewai.memory.storage import LTMSQLiteStorage
# Configure storage path using environment variable
storage_path = os.getenv("CREWAI_STORAGE_DIR", "./storage")
crew = Crew(
memory=True,
long_term_memory=LongTermMemory(
storage=LTMSQLiteStorage(
db_path="{storage_path}/memory.db".format(storage_path=storage_path)
)
)
)
```
## Configuration Examples
### Basic Memory Configuration
```python
from crewai import Crew
from crewai.memory import LongTermMemory
# Simple memory configuration
crew = Crew(memory=True) # Uses default storage locations
```
### Custom Storage Configuration
```python
from crewai import Crew
from crewai.memory import LongTermMemory
from crewai.memory.storage import LTMSQLiteStorage
# Configure custom storage paths
crew = Crew(
memory=True,
long_term_memory=LongTermMemory(
storage=LTMSQLiteStorage(db_path="./memory.db")
)
)
```
## Integrating Mem0 for Enhanced User Memory ## Integrating Mem0 for Enhanced User Memory
[Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences. [Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences.
@@ -185,7 +251,12 @@ my_crew = Crew(
process=Process.sequential, process=Process.sequential,
memory=True, memory=True,
verbose=True, verbose=True,
embedder=OpenAIEmbeddingFunction(api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"), embedder={
"provider": "openai",
"config": {
"model": 'text-embedding-3-small'
}
}
) )
``` ```
@@ -211,6 +282,19 @@ my_crew = Crew(
### Using Google AI embeddings ### Using Google AI embeddings
#### Prerequisites
Before using Google AI embeddings, ensure you have:
- Access to the Gemini API
- The necessary API keys and permissions
You will need to update your *pyproject.toml* dependencies:
```YAML
dependencies = [
"google-generativeai>=0.8.4", #main version in January/2025 - crewai v.0.100.0 and crewai-tools 0.33.0
"crewai[tools]>=0.100.0,<1.0.0"
]
```
```python Code ```python Code
from crewai import Crew, Agent, Task, Process from crewai import Crew, Agent, Task, Process
@@ -224,7 +308,7 @@ my_crew = Crew(
"provider": "google", "provider": "google",
"config": { "config": {
"api_key": "<YOUR_API_KEY>", "api_key": "<YOUR_API_KEY>",
"model_name": "<model_name>" "model": "<model_name>"
} }
} }
) )
@@ -242,13 +326,15 @@ my_crew = Crew(
process=Process.sequential, process=Process.sequential,
memory=True, memory=True,
verbose=True, verbose=True,
embedder=OpenAIEmbeddingFunction( embedder={
api_key="YOUR_API_KEY", "provider": "openai",
api_base="YOUR_API_BASE_PATH", "config": {
api_type="azure", "api_key": "YOUR_API_KEY",
api_version="YOUR_API_VERSION", "api_base": "YOUR_API_BASE_PATH",
model_name="text-embedding-3-small" "api_version": "YOUR_API_VERSION",
) "model_name": 'text-embedding-3-small'
}
}
) )
``` ```
@@ -264,12 +350,15 @@ my_crew = Crew(
process=Process.sequential, process=Process.sequential,
memory=True, memory=True,
verbose=True, verbose=True,
embedder=GoogleVertexEmbeddingFunction( embedder={
project_id="YOUR_PROJECT_ID", "provider": "vertexai",
region="YOUR_REGION", "config": {
api_key="YOUR_API_KEY", "project_id"="YOUR_PROJECT_ID",
model_name="textembedding-gecko" "region"="YOUR_REGION",
) "api_key"="YOUR_API_KEY",
"model_name"="textembedding-gecko"
}
}
) )
``` ```
@@ -288,7 +377,7 @@ my_crew = Crew(
"provider": "cohere", "provider": "cohere",
"config": { "config": {
"api_key": "YOUR_API_KEY", "api_key": "YOUR_API_KEY",
"model_name": "<model_name>" "model": "<model_name>"
} }
} }
) )
@@ -308,7 +397,7 @@ my_crew = Crew(
"provider": "voyageai", "provider": "voyageai",
"config": { "config": {
"api_key": "YOUR_API_KEY", "api_key": "YOUR_API_KEY",
"model_name": "<model_name>" "model": "<model_name>"
} }
} }
) )
@@ -358,6 +447,65 @@ my_crew = Crew(
) )
``` ```
### Using Amazon Bedrock embeddings
```python Code
# Note: Ensure you have installed `boto3` for Bedrock embeddings to work.
import os
import boto3
from crewai import Crew, Agent, Task, Process
boto3_session = boto3.Session(
region_name=os.environ.get("AWS_REGION_NAME"),
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY")
)
my_crew = Crew(
agents=[...],
tasks=[...],
process=Process.sequential,
memory=True,
embedder={
"provider": "bedrock",
"config":{
"session": boto3_session,
"model": "amazon.titan-embed-text-v2:0",
"vector_dimension": 1024
}
}
verbose=True
)
```
### Adding Custom Embedding Function
```python Code
from crewai import Crew, Agent, Task, Process
from chromadb import Documents, EmbeddingFunction, Embeddings
# Create a custom embedding function
class CustomEmbedder(EmbeddingFunction):
def __call__(self, input: Documents) -> Embeddings:
# generate embeddings
return [1, 2, 3] # this is a dummy embedding
my_crew = Crew(
agents=[...],
tasks=[...],
process=Process.sequential,
memory=True,
verbose=True,
embedder={
"provider": "custom",
"config": {
"embedder": CustomEmbedder()
}
}
)
```
### Resetting Memory ### Resetting Memory
```shell ```shell

View File

@@ -81,8 +81,8 @@ my_crew.kickoff()
3. **Collect Data:** 3. **Collect Data:**
- Search for the latest papers, articles, and reports published in 2023 and early 2024. - Search for the latest papers, articles, and reports published in 2024 and early 2025.
- Use keywords like "Large Language Models 2024", "AI LLM advancements", "AI ethics 2024", etc. - Use keywords like "Large Language Models 2025", "AI LLM advancements", "AI ethics 2025", etc.
4. **Analyze Findings:** 4. **Analyze Findings:**

View File

@@ -69,7 +69,7 @@ research_task:
description: > description: >
Conduct a thorough research about {topic} Conduct a thorough research about {topic}
Make sure you find any interesting and relevant information given Make sure you find any interesting and relevant information given
the current year is 2024. the current year is 2025.
expected_output: > expected_output: >
A list with 10 bullet points of the most relevant information about {topic} A list with 10 bullet points of the most relevant information about {topic}
agent: researcher agent: researcher
@@ -155,7 +155,7 @@ research_task = Task(
description=""" description="""
Conduct a thorough research about AI Agents. Conduct a thorough research about AI Agents.
Make sure you find any interesting and relevant information given Make sure you find any interesting and relevant information given
the current year is 2024. the current year is 2025.
""", """,
expected_output=""" expected_output="""
A list with 10 bullet points of the most relevant information about AI Agents A list with 10 bullet points of the most relevant information about AI Agents
@@ -268,7 +268,7 @@ analysis_task = Task(
Task guardrails provide a way to validate and transform task outputs before they Task guardrails provide a way to validate and transform task outputs before they
are passed to the next task. This feature helps ensure data quality and provides are passed to the next task. This feature helps ensure data quality and provides
efeedback to agents when their output doesn't meet specific criteria. feedback to agents when their output doesn't meet specific criteria.
### Using Task Guardrails ### Using Task Guardrails

View File

@@ -60,12 +60,12 @@ writer = Agent(
# Create tasks for your agents # Create tasks for your agents
task1 = Task( task1 = Task(
description=( description=(
"Conduct a comprehensive analysis of the latest advancements in AI in 2024. " "Conduct a comprehensive analysis of the latest advancements in AI in 2025. "
"Identify key trends, breakthrough technologies, and potential industry impacts. " "Identify key trends, breakthrough technologies, and potential industry impacts. "
"Compile your findings in a detailed report. " "Compile your findings in a detailed report. "
"Make sure to check with a human if the draft is good before finalizing your answer." "Make sure to check with a human if the draft is good before finalizing your answer."
), ),
expected_output='A comprehensive full report on the latest AI advancements in 2024, leave nothing out', expected_output='A comprehensive full report on the latest AI advancements in 2025, leave nothing out',
agent=researcher, agent=researcher,
human_input=True human_input=True
) )
@@ -76,7 +76,7 @@ task2 = Task(
"Your post should be informative yet accessible, catering to a tech-savvy audience. " "Your post should be informative yet accessible, catering to a tech-savvy audience. "
"Aim for a narrative that captures the essence of these breakthroughs and their implications for the future." "Aim for a narrative that captures the essence of these breakthroughs and their implications for the future."
), ),
expected_output='A compelling 3 paragraphs blog post formatted as markdown about the latest AI advancements in 2024', expected_output='A compelling 3 paragraphs blog post formatted as markdown about the latest AI advancements in 2025',
agent=writer, agent=writer,
human_input=True human_input=True
) )

View File

@@ -0,0 +1,206 @@
---
title: Agent Monitoring with MLflow
description: Quickly start monitoring your Agents with MLflow.
icon: bars-staggered
---
# MLflow Overview
[MLflow](https://mlflow.org/) is an open-source platform to assist machine learning practitioners and teams in handling the complexities of the machine learning process.
It provides a tracing feature that enhances LLM observability in your Generative AI applications by capturing detailed information about the execution of your applications services.
Tracing provides a way to record the inputs, outputs, and metadata associated with each intermediate step of a request, enabling you to easily pinpoint the source of bugs and unexpected behaviors.
![Overview of MLflow crewAI tracing usage](/images/mlflow-tracing.gif)
### Features
- **Tracing Dashboard**: Monitor activities of your crewAI agents with detailed dashboards that include inputs, outputs and metadata of spans.
- **Automated Tracing**: A fully automated integration with crewAI, which can be enabled by running `mlflow.crewai.autolog()`.
- **Manual Trace Instrumentation with minor efforts**: Customize trace instrumentation through MLflow's high-level fluent APIs such as decorators, function wrappers and context managers.
- **OpenTelemetry Compatibility**: MLflow Tracing supports exporting traces to an OpenTelemetry Collector, which can then be used to export traces to various backends such as Jaeger, Zipkin, and AWS X-Ray.
- **Package and Deploy Agents**: Package and deploy your crewAI agents to an inference server with a variety of deployment targets.
- **Securely Host LLMs**: Host multiple LLM from various providers in one unified endpoint through MFflow gateway.
- **Evaluation**: Evaluate your crewAI agents with a wide range of metrics using a convenient API `mlflow.evaluate()`.
## Setup Instructions
<Steps>
<Step title="Install MLflow package">
```shell
# The crewAI integration is available in mlflow>=2.19.0
pip install mlflow
```
</Step>
<Step title="Start MFflow tracking server">
```shell
# This process is optional, but it is recommended to use MLflow tracking server for better visualization and broader features.
mlflow server
```
</Step>
<Step title="Initialize MLflow in Your Application">
Add the following two lines to your application code:
```python
import mlflow
mlflow.crewai.autolog()
# Optional: Set a tracking URI and an experiment name if you have a tracking server
mlflow.set_tracking_uri("http://localhost:5000")
mlflow.set_experiment("CrewAI")
```
Example Usage for tracing CrewAI Agents:
```python
from crewai import Agent, Crew, Task
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from crewai_tools import SerperDevTool, WebsiteSearchTool
from textwrap import dedent
content = "Users name is John. He is 30 years old and lives in San Francisco."
string_source = StringKnowledgeSource(
content=content, metadata={"preference": "personal"}
)
search_tool = WebsiteSearchTool()
class TripAgents:
def city_selection_agent(self):
return Agent(
role="City Selection Expert",
goal="Select the best city based on weather, season, and prices",
backstory="An expert in analyzing travel data to pick ideal destinations",
tools=[
search_tool,
],
verbose=True,
)
def local_expert(self):
return Agent(
role="Local Expert at this city",
goal="Provide the BEST insights about the selected city",
backstory="""A knowledgeable local guide with extensive information
about the city, it's attractions and customs""",
tools=[search_tool],
verbose=True,
)
class TripTasks:
def identify_task(self, agent, origin, cities, interests, range):
return Task(
description=dedent(
f"""
Analyze and select the best city for the trip based
on specific criteria such as weather patterns, seasonal
events, and travel costs. This task involves comparing
multiple cities, considering factors like current weather
conditions, upcoming cultural or seasonal events, and
overall travel expenses.
Your final answer must be a detailed
report on the chosen city, and everything you found out
about it, including the actual flight costs, weather
forecast and attractions.
Traveling from: {origin}
City Options: {cities}
Trip Date: {range}
Traveler Interests: {interests}
"""
),
agent=agent,
expected_output="Detailed report on the chosen city including flight costs, weather forecast, and attractions",
)
def gather_task(self, agent, origin, interests, range):
return Task(
description=dedent(
f"""
As a local expert on this city you must compile an
in-depth guide for someone traveling there and wanting
to have THE BEST trip ever!
Gather information about key attractions, local customs,
special events, and daily activity recommendations.
Find the best spots to go to, the kind of place only a
local would know.
This guide should provide a thorough overview of what
the city has to offer, including hidden gems, cultural
hotspots, must-visit landmarks, weather forecasts, and
high level costs.
The final answer must be a comprehensive city guide,
rich in cultural insights and practical tips,
tailored to enhance the travel experience.
Trip Date: {range}
Traveling from: {origin}
Traveler Interests: {interests}
"""
),
agent=agent,
expected_output="Comprehensive city guide including hidden gems, cultural hotspots, and practical travel tips",
)
class TripCrew:
def __init__(self, origin, cities, date_range, interests):
self.cities = cities
self.origin = origin
self.interests = interests
self.date_range = date_range
def run(self):
agents = TripAgents()
tasks = TripTasks()
city_selector_agent = agents.city_selection_agent()
local_expert_agent = agents.local_expert()
identify_task = tasks.identify_task(
city_selector_agent,
self.origin,
self.cities,
self.interests,
self.date_range,
)
gather_task = tasks.gather_task(
local_expert_agent, self.origin, self.interests, self.date_range
)
crew = Crew(
agents=[city_selector_agent, local_expert_agent],
tasks=[identify_task, gather_task],
verbose=True,
memory=True,
knowledge={
"sources": [string_source],
"metadata": {"preference": "personal"},
},
)
result = crew.kickoff()
return result
trip_crew = TripCrew("California", "Tokyo", "Dec 12 - Dec 20", "sports")
result = trip_crew.run()
print(result)
```
Refer to [MLflow Tracing Documentation](https://mlflow.org/docs/latest/llms/tracing/index.html) for more configurations and use cases.
</Step>
<Step title="Visualize Activities of Agents">
Now traces for your crewAI agents are captured by MLflow.
Let's visit MLflow tracking server to view the traces and get insights into your Agents.
Open `127.0.0.1:5000` on your browser to visit MLflow tracking server.
<Frame caption="MLflow Tracing Dashboard">
<img src="/images/mlflow1.png" alt="MLflow tracing example with crewai" />
</Frame>
</Step>
</Steps>

View File

@@ -45,6 +45,7 @@ image_analyst = Agent(
# Create a task for image analysis # Create a task for image analysis
task = Task( task = Task(
description="Analyze the product image at https://example.com/product.jpg and provide a detailed description", description="Analyze the product image at https://example.com/product.jpg and provide a detailed description",
expected_output="A detailed description of the product image",
agent=image_analyst agent=image_analyst
) )
@@ -81,6 +82,7 @@ inspection_task = Task(
3. Compliance with standards 3. Compliance with standards
Provide a detailed report highlighting any issues found. Provide a detailed report highlighting any issues found.
""", """,
expected_output="A detailed report highlighting any issues found",
agent=expert_analyst agent=expert_analyst
) )

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 MiB

BIN
docs/images/mlflow1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 382 KiB

View File

@@ -101,6 +101,7 @@
"how-to/conditional-tasks", "how-to/conditional-tasks",
"how-to/agentops-observability", "how-to/agentops-observability",
"how-to/langtrace-observability", "how-to/langtrace-observability",
"how-to/mlflow-observability",
"how-to/openlit-observability", "how-to/openlit-observability",
"how-to/portkey-observability" "how-to/portkey-observability"
] ]

View File

@@ -58,7 +58,7 @@ Follow the steps below to get crewing! 🚣‍♂️
description: > description: >
Conduct a thorough research about {topic} Conduct a thorough research about {topic}
Make sure you find any interesting and relevant information given Make sure you find any interesting and relevant information given
the current year is 2024. the current year is 2025.
expected_output: > expected_output: >
A list with 10 bullet points of the most relevant information about {topic} A list with 10 bullet points of the most relevant information about {topic}
agent: researcher agent: researcher
@@ -195,10 +195,10 @@ Follow the steps below to get crewing! 🚣‍♂️
<CodeGroup> <CodeGroup>
```markdown output/report.md ```markdown output/report.md
# Comprehensive Report on the Rise and Impact of AI Agents in 2024 # Comprehensive Report on the Rise and Impact of AI Agents in 2025
## 1. Introduction to AI Agents ## 1. Introduction to AI Agents
In 2024, Artificial Intelligence (AI) agents are at the forefront of innovation across various industries. As intelligent systems that can perform tasks typically requiring human cognition, AI agents are paving the way for significant advancements in operational efficiency, decision-making, and overall productivity within sectors like Human Resources (HR) and Finance. This report aims to detail the rise of AI agents, their frameworks, applications, and potential implications on the workforce. In 2025, Artificial Intelligence (AI) agents are at the forefront of innovation across various industries. As intelligent systems that can perform tasks typically requiring human cognition, AI agents are paving the way for significant advancements in operational efficiency, decision-making, and overall productivity within sectors like Human Resources (HR) and Finance. This report aims to detail the rise of AI agents, their frameworks, applications, and potential implications on the workforce.
## 2. Benefits of AI Agents ## 2. Benefits of AI Agents
AI agents bring numerous advantages that are transforming traditional work environments. Key benefits include: AI agents bring numerous advantages that are transforming traditional work environments. Key benefits include:
@@ -252,7 +252,7 @@ Follow the steps below to get crewing! 🚣‍♂️
To stay competitive and harness the full potential of AI agents, organizations must remain vigilant about latest developments in AI technology and consider continuous learning and adaptation in their strategic planning. To stay competitive and harness the full potential of AI agents, organizations must remain vigilant about latest developments in AI technology and consider continuous learning and adaptation in their strategic planning.
## 8. Conclusion ## 8. Conclusion
The emergence of AI agents is undeniably reshaping the workplace landscape in 2024. With their ability to automate tasks, enhance efficiency, and improve decision-making, AI agents are critical in driving operational success. Organizations must embrace and adapt to AI developments to thrive in an increasingly digital business environment. The emergence of AI agents is undeniably reshaping the workplace landscape in 5. With their ability to automate tasks, enhance efficiency, and improve decision-making, AI agents are critical in driving operational success. Organizations must embrace and adapt to AI developments to thrive in an increasingly digital business environment.
``` ```
</CodeGroup> </CodeGroup>
</Step> </Step>

View File

@@ -8,9 +8,9 @@ icon: file-pen
## Description ## Description
The `FileWriterTool` is a component of the crewai_tools package, designed to simplify the process of writing content to files. The `FileWriterTool` is a component of the crewai_tools package, designed to simplify the process of writing content to files with cross-platform compatibility (Windows, Linux, macOS).
It is particularly useful in scenarios such as generating reports, saving logs, creating configuration files, and more. It is particularly useful in scenarios such as generating reports, saving logs, creating configuration files, and more.
This tool supports creating new directories if they don't exist, making it easier to organize your output. This tool handles path differences across operating systems, supports UTF-8 encoding, and automatically creates directories if they don't exist, making it easier to organize your output reliably across different platforms.
## Installation ## Installation
@@ -43,6 +43,8 @@ print(result)
## Conclusion ## Conclusion
By integrating the `FileWriterTool` into your crews, the agents can execute the process of writing content to files and creating directories. By integrating the `FileWriterTool` into your crews, the agents can reliably write content to files across different operating systems.
This tool is essential for tasks that require saving output data, creating structured file systems, and more. By adhering to the setup and usage guidelines provided, This tool is essential for tasks that require saving output data, creating structured file systems, and handling cross-platform file operations.
incorporating this tool into projects is straightforward and efficient. It's particularly recommended for Windows users who may encounter file writing issues with standard Python file operations.
By adhering to the setup and usage guidelines provided, incorporating this tool into projects is straightforward and ensures consistent file writing behavior across all platforms.

View File

@@ -152,6 +152,7 @@ nav:
- Agent Monitoring with AgentOps: 'how-to/AgentOps-Observability.md' - Agent Monitoring with AgentOps: 'how-to/AgentOps-Observability.md'
- Agent Monitoring with LangTrace: 'how-to/Langtrace-Observability.md' - Agent Monitoring with LangTrace: 'how-to/Langtrace-Observability.md'
- Agent Monitoring with OpenLIT: 'how-to/openlit-Observability.md' - Agent Monitoring with OpenLIT: 'how-to/openlit-Observability.md'
- Agent Monitoring with MLflow: 'how-to/mlflow-Observability.md'
- Tools Docs: - Tools Docs:
- Browserbase Web Loader: 'tools/BrowserbaseLoadTool.md' - Browserbase Web Loader: 'tools/BrowserbaseLoadTool.md'
- Code Docs RAG Search: 'tools/CodeDocsSearchTool.md' - Code Docs RAG Search: 'tools/CodeDocsSearchTool.md'

View File

@@ -1,6 +1,6 @@
[project] [project]
name = "crewai" name = "crewai"
version = "0.100.0" version = "0.100.1"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks." description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
readme = "README.md" readme = "README.md"
requires-python = ">=3.10,<3.13" requires-python = ">=3.10,<3.13"

View File

@@ -14,7 +14,7 @@ warnings.filterwarnings(
category=UserWarning, category=UserWarning,
module="pydantic.main", module="pydantic.main",
) )
__version__ = "0.100.0" __version__ = "0.100.1"
__all__ = [ __all__ = [
"Agent", "Agent",
"Crew", "Crew",

View File

@@ -1,6 +1,7 @@
import re
import shutil import shutil
import subprocess import subprocess
from typing import Any, Dict, List, Literal, Optional, Union from typing import Any, Dict, List, Literal, Optional, Sequence, Union
from pydantic import Field, InstanceOf, PrivateAttr, model_validator from pydantic import Field, InstanceOf, PrivateAttr, model_validator
@@ -15,7 +16,6 @@ from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.task import Task from crewai.task import Task
from crewai.tools import BaseTool from crewai.tools import BaseTool
from crewai.tools.agent_tools.agent_tools import AgentTools from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.tools.base_tool import Tool
from crewai.utilities import Converter, Prompts from crewai.utilities import Converter, Prompts
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
from crewai.utilities.converter import generate_model_description from crewai.utilities.converter import generate_model_description
@@ -59,7 +59,6 @@ class Agent(BaseAgent):
llm: The language model that will run the agent. llm: The language model that will run the agent.
function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm. function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm.
max_iter: Maximum number of iterations for an agent to execute a task. max_iter: Maximum number of iterations for an agent to execute a task.
memory: Whether the agent should have memory or not.
max_rpm: Maximum number of requests per minute for the agent execution to be respected. max_rpm: Maximum number of requests per minute for the agent execution to be respected.
verbose: Whether the agent execution should be in verbose mode. verbose: Whether the agent execution should be in verbose mode.
allow_delegation: Whether the agent is allowed to delegate tasks to other agents. allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
@@ -76,9 +75,6 @@ class Agent(BaseAgent):
) )
agent_ops_agent_name: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str") agent_ops_agent_name: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
agent_ops_agent_id: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str") agent_ops_agent_id: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
cache_handler: InstanceOf[CacheHandler] = Field(
default=None, description="An instance of the CacheHandler class."
)
step_callback: Optional[Any] = Field( step_callback: Optional[Any] = Field(
default=None, default=None,
description="Callback to be executed after each step of the agent execution.", description="Callback to be executed after each step of the agent execution.",
@@ -112,10 +108,6 @@ class Agent(BaseAgent):
default=True, default=True,
description="Keep messages under the context window size by summarizing content.", description="Keep messages under the context window size by summarizing content.",
) )
max_iter: int = Field(
default=20,
description="Maximum number of iterations for an agent to execute a task before giving it's best answer",
)
max_retry_limit: int = Field( max_retry_limit: int = Field(
default=2, default=2,
description="Maximum number of retries for an agent to execute a task when an error occurs.", description="Maximum number of retries for an agent to execute a task when an error occurs.",
@@ -158,7 +150,8 @@ class Agent(BaseAgent):
def _set_knowledge(self): def _set_knowledge(self):
try: try:
if self.knowledge_sources: if self.knowledge_sources:
knowledge_agent_name = f"{self.role.replace(' ', '_')}" full_pattern = re.compile(r"[^a-zA-Z0-9\-_\r\n]|(\.\.)")
knowledge_agent_name = f"{re.sub(full_pattern, '_', self.role)}"
if isinstance(self.knowledge_sources, list) and all( if isinstance(self.knowledge_sources, list) and all(
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
): ):
@@ -200,13 +193,15 @@ class Agent(BaseAgent):
if task.output_json: if task.output_json:
# schema = json.dumps(task.output_json, indent=2) # schema = json.dumps(task.output_json, indent=2)
schema = generate_model_description(task.output_json) schema = generate_model_description(task.output_json)
task_prompt += "\n" + self.i18n.slice(
"formatted_task_instructions"
).format(output_format=schema)
elif task.output_pydantic: elif task.output_pydantic:
schema = generate_model_description(task.output_pydantic) schema = generate_model_description(task.output_pydantic)
task_prompt += "\n" + self.i18n.slice(
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format( "formatted_task_instructions"
output_format=schema ).format(output_format=schema)
)
if context: if context:
task_prompt = self.i18n.slice("task_with_context").format( task_prompt = self.i18n.slice("task_with_context").format(
@@ -344,14 +339,14 @@ class Agent(BaseAgent):
tools = agent_tools.tools() tools = agent_tools.tools()
return tools return tools
def get_multimodal_tools(self) -> List[Tool]: def get_multimodal_tools(self) -> Sequence[BaseTool]:
from crewai.tools.agent_tools.add_image_tool import AddImageTool from crewai.tools.agent_tools.add_image_tool import AddImageTool
return [AddImageTool()] return [AddImageTool()]
def get_code_execution_tools(self): def get_code_execution_tools(self):
try: try:
from crewai_tools import CodeInterpreterTool from crewai_tools import CodeInterpreterTool # type: ignore
# Set the unsafe_mode based on the code_execution_mode attribute # Set the unsafe_mode based on the code_execution_mode attribute
unsafe_mode = self.code_execution_mode == "unsafe" unsafe_mode = self.code_execution_mode == "unsafe"

View File

@@ -24,6 +24,7 @@ from crewai.tools import BaseTool
from crewai.tools.base_tool import Tool from crewai.tools.base_tool import Tool
from crewai.utilities import I18N, Logger, RPMController from crewai.utilities import I18N, Logger, RPMController
from crewai.utilities.config import process_config from crewai.utilities.config import process_config
from crewai.utilities.converter import Converter
T = TypeVar("T", bound="BaseAgent") T = TypeVar("T", bound="BaseAgent")
@@ -42,7 +43,7 @@ class BaseAgent(ABC, BaseModel):
max_rpm (Optional[int]): Maximum number of requests per minute for the agent execution. max_rpm (Optional[int]): Maximum number of requests per minute for the agent execution.
allow_delegation (bool): Allow delegation of tasks to agents. allow_delegation (bool): Allow delegation of tasks to agents.
tools (Optional[List[Any]]): Tools at the agent's disposal. tools (Optional[List[Any]]): Tools at the agent's disposal.
max_iter (Optional[int]): Maximum iterations for an agent to execute a task. max_iter (int): Maximum iterations for an agent to execute a task.
agent_executor (InstanceOf): An instance of the CrewAgentExecutor class. agent_executor (InstanceOf): An instance of the CrewAgentExecutor class.
llm (Any): Language model that will run the agent. llm (Any): Language model that will run the agent.
crew (Any): Crew to which the agent belongs. crew (Any): Crew to which the agent belongs.
@@ -114,7 +115,7 @@ class BaseAgent(ABC, BaseModel):
tools: Optional[List[Any]] = Field( tools: Optional[List[Any]] = Field(
default_factory=list, description="Tools at agents' disposal" default_factory=list, description="Tools at agents' disposal"
) )
max_iter: Optional[int] = Field( max_iter: int = Field(
default=25, description="Maximum iterations for an agent to execute a task" default=25, description="Maximum iterations for an agent to execute a task"
) )
agent_executor: InstanceOf = Field( agent_executor: InstanceOf = Field(
@@ -125,11 +126,12 @@ class BaseAgent(ABC, BaseModel):
) )
crew: Any = Field(default=None, description="Crew to which the agent belongs.") crew: Any = Field(default=None, description="Crew to which the agent belongs.")
i18n: I18N = Field(default=I18N(), description="Internationalization settings.") i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
cache_handler: InstanceOf[CacheHandler] = Field( cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
default=None, description="An instance of the CacheHandler class." default=None, description="An instance of the CacheHandler class."
) )
tools_handler: InstanceOf[ToolsHandler] = Field( tools_handler: InstanceOf[ToolsHandler] = Field(
default=None, description="An instance of the ToolsHandler class." default_factory=ToolsHandler,
description="An instance of the ToolsHandler class.",
) )
max_tokens: Optional[int] = Field( max_tokens: Optional[int] = Field(
default=None, description="Maximum number of tokens for the agent's execution." default=None, description="Maximum number of tokens for the agent's execution."
@@ -254,7 +256,7 @@ class BaseAgent(ABC, BaseModel):
@abstractmethod @abstractmethod
def get_output_converter( def get_output_converter(
self, llm: Any, text: str, model: type[BaseModel] | None, instructions: str self, llm: Any, text: str, model: type[BaseModel] | None, instructions: str
): ) -> Converter:
"""Get the converter class for the agent to create json/pydantic outputs.""" """Get the converter class for the agent to create json/pydantic outputs."""
pass pass

View File

@@ -2,11 +2,7 @@ import subprocess
import click import click
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage from crewai.cli.utils import get_crew
from crewai.memory.entity.entity_memory import EntityMemory
from crewai.memory.long_term.long_term_memory import LongTermMemory
from crewai.memory.short_term.short_term_memory import ShortTermMemory
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
def reset_memories_command( def reset_memories_command(
@@ -30,30 +26,35 @@ def reset_memories_command(
""" """
try: try:
crew = get_crew()
if not crew:
raise ValueError("No crew found.")
if all: if all:
ShortTermMemory().reset() crew.reset_memories(command_type="all")
EntityMemory().reset()
LongTermMemory().reset()
TaskOutputStorageHandler().reset()
KnowledgeStorage().reset()
click.echo("All memories have been reset.") click.echo("All memories have been reset.")
else: return
if long:
LongTermMemory().reset()
click.echo("Long term memory has been reset.")
if short: if not any([long, short, entity, kickoff_outputs, knowledge]):
ShortTermMemory().reset() click.echo(
click.echo("Short term memory has been reset.") "No memory type specified. Please specify at least one type to reset."
if entity: )
EntityMemory().reset() return
click.echo("Entity memory has been reset.")
if kickoff_outputs: if long:
TaskOutputStorageHandler().reset() crew.reset_memories(command_type="long")
click.echo("Latest Kickoff outputs stored has been reset.") click.echo("Long term memory has been reset.")
if knowledge: if short:
KnowledgeStorage().reset() crew.reset_memories(command_type="short")
click.echo("Knowledge has been reset.") click.echo("Short term memory has been reset.")
if entity:
crew.reset_memories(command_type="entity")
click.echo("Entity memory has been reset.")
if kickoff_outputs:
crew.reset_memories(command_type="kickoff_outputs")
click.echo("Latest Kickoff outputs stored has been reset.")
if knowledge:
crew.reset_memories(command_type="knowledge")
click.echo("Knowledge has been reset.")
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
click.echo(f"An error occurred while resetting the memories: {e}", err=True) click.echo(f"An error occurred while resetting the memories: {e}", err=True)

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }] authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.13" requires-python = ">=3.10,<3.13"
dependencies = [ dependencies = [
"crewai[tools]>=0.100.0,<1.0.0" "crewai[tools]>=0.100.1,<1.0.0"
] ]
[project.scripts] [project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }] authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.13" requires-python = ">=3.10,<3.13"
dependencies = [ dependencies = [
"crewai[tools]>=0.100.0,<1.0.0", "crewai[tools]>=0.100.1,<1.0.0",
] ]
[project.scripts] [project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md" readme = "README.md"
requires-python = ">=3.10,<3.13" requires-python = ">=3.10,<3.13"
dependencies = [ dependencies = [
"crewai[tools]>=0.100.0" "crewai[tools]>=0.100.1"
] ]
[tool.crewai] [tool.crewai]

View File

@@ -9,6 +9,7 @@ import tomli
from rich.console import Console from rich.console import Console
from crewai.cli.constants import ENV_VARS from crewai.cli.constants import ENV_VARS
from crewai.crew import Crew
if sys.version_info >= (3, 11): if sys.version_info >= (3, 11):
import tomllib import tomllib
@@ -247,3 +248,64 @@ def write_env_file(folder_path, env_vars):
with open(env_file_path, "w") as file: with open(env_file_path, "w") as file:
for key, value in env_vars.items(): for key, value in env_vars.items():
file.write(f"{key}={value}\n") file.write(f"{key}={value}\n")
def get_crew(crew_path: str = "crew.py", require: bool = False) -> Crew | None:
"""Get the crew instance from the crew.py file."""
try:
import importlib.util
import os
for root, _, files in os.walk("."):
if "crew.py" in files:
crew_path = os.path.join(root, "crew.py")
try:
spec = importlib.util.spec_from_file_location(
"crew_module", crew_path
)
if not spec or not spec.loader:
continue
module = importlib.util.module_from_spec(spec)
try:
sys.modules[spec.name] = module
spec.loader.exec_module(module)
for attr_name in dir(module):
attr = getattr(module, attr_name)
try:
if callable(attr) and hasattr(attr, "crew"):
crew_instance = attr().crew()
return crew_instance
except Exception as e:
print(f"Error processing attribute {attr_name}: {e}")
continue
except Exception as exec_error:
print(f"Error executing module: {exec_error}")
import traceback
print(f"Traceback: {traceback.format_exc()}")
except (ImportError, AttributeError) as e:
if require:
console.print(
f"Error importing crew from {crew_path}: {str(e)}",
style="bold red",
)
continue
break
if require:
console.print("No valid Crew instance found in crew.py", style="bold red")
raise SystemExit
return None
except Exception as e:
if require:
console.print(
f"Unexpected error while loading crew: {str(e)}", style="bold red"
)
raise SystemExit
return None

View File

@@ -195,9 +195,9 @@ class Crew(BaseModel):
default=None, default=None,
description="Path to the prompt json file to be used for the crew.", description="Path to the prompt json file to be used for the crew.",
) )
output_log_file: Optional[str] = Field( output_log_file: Optional[Union[bool, str]] = Field(
default=None, default=None,
description="output_log_file", description="Path to the log file to be saved",
) )
planning: Optional[bool] = Field( planning: Optional[bool] = Field(
default=False, default=False,
@@ -309,7 +309,7 @@ class Crew(BaseModel):
): ):
self.knowledge = Knowledge( self.knowledge = Knowledge(
sources=self.knowledge_sources, sources=self.knowledge_sources,
embedder_config=self.embedder, embedder=self.embedder,
collection_name="crew", collection_name="crew",
) )
@@ -396,6 +396,22 @@ class Crew(BaseModel):
return self return self
@model_validator(mode="after")
def validate_must_have_non_conditional_task(self) -> "Crew":
"""Ensure that a crew has at least one non-conditional task."""
if not self.tasks:
return self
non_conditional_count = sum(
1 for task in self.tasks if not isinstance(task, ConditionalTask)
)
if non_conditional_count == 0:
raise PydanticCustomError(
"only_conditional_tasks",
"Crew must include at least one non-conditional task",
{},
)
return self
@model_validator(mode="after") @model_validator(mode="after")
def validate_first_task(self) -> "Crew": def validate_first_task(self) -> "Crew":
"""Ensure the first task is not a ConditionalTask.""" """Ensure the first task is not a ConditionalTask."""
@@ -455,6 +471,8 @@ class Crew(BaseModel):
) )
return self return self
@property @property
def key(self) -> str: def key(self) -> str:
source = [agent.key for agent in self.agents] + [ source = [agent.key for agent in self.agents] + [
@@ -723,12 +741,7 @@ class Crew(BaseModel):
manager.tools = [] manager.tools = []
raise Exception("Manager agent should not have tools") raise Exception("Manager agent should not have tools")
else: else:
self.manager_llm = ( self.manager_llm = create_llm(self.manager_llm)
getattr(self.manager_llm, "model_name", None)
or getattr(self.manager_llm, "model", None)
or getattr(self.manager_llm, "deployment_name", None)
or self.manager_llm
)
manager = Agent( manager = Agent(
role=i18n.retrieve("hierarchical_manager_agent", "role"), role=i18n.retrieve("hierarchical_manager_agent", "role"),
goal=i18n.retrieve("hierarchical_manager_agent", "goal"), goal=i18n.retrieve("hierarchical_manager_agent", "goal"),
@@ -788,6 +801,7 @@ class Crew(BaseModel):
task, task_outputs, futures, task_index, was_replayed task, task_outputs, futures, task_index, was_replayed
) )
if skipped_task_output: if skipped_task_output:
task_outputs.append(skipped_task_output)
continue continue
if task.async_execution: if task.async_execution:
@@ -811,7 +825,7 @@ class Crew(BaseModel):
context=context, context=context,
tools=tools_for_task, tools=tools_for_task,
) )
task_outputs = [task_output] task_outputs.append(task_output)
self._process_task_result(task, task_output) self._process_task_result(task, task_output)
self._store_execution_log(task, task_output, task_index, was_replayed) self._store_execution_log(task, task_output, task_index, was_replayed)
@@ -832,7 +846,7 @@ class Crew(BaseModel):
task_outputs = self._process_async_tasks(futures, was_replayed) task_outputs = self._process_async_tasks(futures, was_replayed)
futures.clear() futures.clear()
previous_output = task_outputs[task_index - 1] if task_outputs else None previous_output = task_outputs[-1] if task_outputs else None
if previous_output is not None and not task.should_execute(previous_output): if previous_output is not None and not task.should_execute(previous_output):
self._logger.log( self._logger.log(
"debug", "debug",
@@ -954,11 +968,15 @@ class Crew(BaseModel):
) )
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput: def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
if len(task_outputs) != 1: if not task_outputs:
raise ValueError( raise ValueError("No task outputs available to create crew output.")
"Something went wrong. Kickoff should return only one task output."
) # Filter out empty outputs and get the last valid one as the main output
final_task_output = task_outputs[0] valid_outputs = [t for t in task_outputs if t.raw]
if not valid_outputs:
raise ValueError("No valid task outputs available to create crew output.")
final_task_output = valid_outputs[-1]
final_string_output = final_task_output.raw final_string_output = final_task_output.raw
self._finish_execution(final_string_output) self._finish_execution(final_string_output)
token_usage = self.calculate_usage_metrics() token_usage = self.calculate_usage_metrics()
@@ -972,7 +990,7 @@ class Crew(BaseModel):
raw=final_task_output.raw, raw=final_task_output.raw,
pydantic=final_task_output.pydantic, pydantic=final_task_output.pydantic,
json_dict=final_task_output.json_dict, json_dict=final_task_output.json_dict,
tasks_output=[task.output for task in self.tasks if task.output], tasks_output=task_outputs,
token_usage=token_usage, token_usage=token_usage,
) )
@@ -1212,3 +1230,80 @@ class Crew(BaseModel):
def __repr__(self): def __repr__(self):
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})" return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
def reset_memories(self, command_type: str) -> None:
"""Reset specific or all memories for the crew.
Args:
command_type: Type of memory to reset.
Valid options: 'long', 'short', 'entity', 'knowledge',
'kickoff_outputs', or 'all'
Raises:
ValueError: If an invalid command type is provided.
RuntimeError: If memory reset operation fails.
"""
VALID_TYPES = frozenset(
["long", "short", "entity", "knowledge", "kickoff_outputs", "all"]
)
if command_type not in VALID_TYPES:
raise ValueError(
f"Invalid command type. Must be one of: {', '.join(sorted(VALID_TYPES))}"
)
try:
if command_type == "all":
self._reset_all_memories()
else:
self._reset_specific_memory(command_type)
self._logger.log("info", f"{command_type} memory has been reset")
except Exception as e:
error_msg = f"Failed to reset {command_type} memory: {str(e)}"
self._logger.log("error", error_msg)
raise RuntimeError(error_msg) from e
def _reset_all_memories(self) -> None:
"""Reset all available memory systems."""
memory_systems = [
("short term", self._short_term_memory),
("entity", self._entity_memory),
("long term", self._long_term_memory),
("task output", self._task_output_handler),
("knowledge", self.knowledge),
]
for name, system in memory_systems:
if system is not None:
try:
system.reset()
except Exception as e:
raise RuntimeError(f"Failed to reset {name} memory") from e
def _reset_specific_memory(self, memory_type: str) -> None:
"""Reset a specific memory system.
Args:
memory_type: Type of memory to reset
Raises:
RuntimeError: If the specified memory system fails to reset
"""
reset_functions = {
"long": (self._long_term_memory, "long term"),
"short": (self._short_term_memory, "short term"),
"entity": (self._entity_memory, "entity"),
"knowledge": (self.knowledge, "knowledge"),
"kickoff_outputs": (self._task_output_handler, "task output"),
}
memory_system, name = reset_functions[memory_type]
if memory_system is None:
raise RuntimeError(f"{name} memory system is not initialized")
try:
memory_system.reset()
except Exception as e:
raise RuntimeError(f"Failed to reset {name} memory") from e

View File

@@ -67,3 +67,9 @@ class Knowledge(BaseModel):
source.add() source.add()
except Exception as e: except Exception as e:
raise e raise e
def reset(self) -> None:
if self.storage:
self.storage.reset()
else:
raise ValueError("Storage is not initialized.")

View File

@@ -1,28 +1,138 @@
from pathlib import Path from pathlib import Path
from typing import Dict, List from typing import Dict, Iterator, List, Optional, Union
from urllib.parse import urlparse
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource from pydantic import Field, field_validator
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
from crewai.utilities.logger import Logger
class ExcelKnowledgeSource(BaseFileKnowledgeSource): class ExcelKnowledgeSource(BaseKnowledgeSource):
"""A knowledge source that stores and queries Excel file content using embeddings.""" """A knowledge source that stores and queries Excel file content using embeddings."""
def load_content(self) -> Dict[Path, str]: # override content to be a dict of file paths to sheet names to csv content
"""Load and preprocess Excel file content."""
pd = self._import_dependencies()
_logger: Logger = Logger(verbose=True)
file_path: Optional[Union[Path, List[Path], str, List[str]]] = Field(
default=None,
description="[Deprecated] The path to the file. Use file_paths instead.",
)
file_paths: Optional[Union[Path, List[Path], str, List[str]]] = Field(
default_factory=list, description="The path to the file"
)
chunks: List[str] = Field(default_factory=list)
content: Dict[Path, Dict[str, str]] = Field(default_factory=dict)
safe_file_paths: List[Path] = Field(default_factory=list)
@field_validator("file_path", "file_paths", mode="before")
def validate_file_path(cls, v, info):
"""Validate that at least one of file_path or file_paths is provided."""
# Single check if both are None, O(1) instead of nested conditions
if (
v is None
and info.data.get(
"file_path" if info.field_name == "file_paths" else "file_paths"
)
is None
):
raise ValueError("Either file_path or file_paths must be provided")
return v
def _process_file_paths(self) -> List[Path]:
"""Convert file_path to a list of Path objects."""
if hasattr(self, "file_path") and self.file_path is not None:
self._logger.log(
"warning",
"The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.",
color="yellow",
)
self.file_paths = self.file_path
if self.file_paths is None:
raise ValueError("Your source must be provided with a file_paths: []")
# Convert single path to list
path_list: List[Union[Path, str]] = (
[self.file_paths]
if isinstance(self.file_paths, (str, Path))
else list(self.file_paths)
if isinstance(self.file_paths, list)
else []
)
if not path_list:
raise ValueError(
"file_path/file_paths must be a Path, str, or a list of these types"
)
return [self.convert_to_path(path) for path in path_list]
def validate_content(self):
"""Validate the paths."""
for path in self.safe_file_paths:
if not path.exists():
self._logger.log(
"error",
f"File not found: {path}. Try adding sources to the knowledge directory. If it's inside the knowledge directory, use the relative path.",
color="red",
)
raise FileNotFoundError(f"File not found: {path}")
if not path.is_file():
self._logger.log(
"error",
f"Path is not a file: {path}",
color="red",
)
def model_post_init(self, _) -> None:
if self.file_path:
self._logger.log(
"warning",
"The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.",
color="yellow",
)
self.file_paths = self.file_path
self.safe_file_paths = self._process_file_paths()
self.validate_content()
self.content = self._load_content()
def _load_content(self) -> Dict[Path, Dict[str, str]]:
"""Load and preprocess Excel file content from multiple sheets.
Each sheet's content is converted to CSV format and stored.
Returns:
Dict[Path, Dict[str, str]]: A mapping of file paths to their respective sheet contents.
Raises:
ImportError: If required dependencies are missing.
FileNotFoundError: If the specified Excel file cannot be opened.
"""
pd = self._import_dependencies()
content_dict = {} content_dict = {}
for file_path in self.safe_file_paths: for file_path in self.safe_file_paths:
file_path = self.convert_to_path(file_path) file_path = self.convert_to_path(file_path)
df = pd.read_excel(file_path) with pd.ExcelFile(file_path) as xl:
content = df.to_csv(index=False) sheet_dict = {
content_dict[file_path] = content str(sheet_name): str(
pd.read_excel(xl, sheet_name).to_csv(index=False)
)
for sheet_name in xl.sheet_names
}
content_dict[file_path] = sheet_dict
return content_dict return content_dict
def convert_to_path(self, path: Union[Path, str]) -> Path:
"""Convert a path to a Path object."""
return Path(KNOWLEDGE_DIRECTORY + "/" + path) if isinstance(path, str) else path
def _import_dependencies(self): def _import_dependencies(self):
"""Dynamically import dependencies.""" """Dynamically import dependencies."""
try: try:
import openpyxl # noqa
import pandas as pd import pandas as pd
return pd return pd
@@ -38,10 +148,14 @@ class ExcelKnowledgeSource(BaseFileKnowledgeSource):
and save the embeddings. and save the embeddings.
""" """
# Convert dictionary values to a single string if content is a dictionary # Convert dictionary values to a single string if content is a dictionary
if isinstance(self.content, dict): # Updated to account for .xlsx workbooks with multiple tabs/sheets
content_str = "\n".join(str(value) for value in self.content.values()) content_str = ""
else: for value in self.content.values():
content_str = str(self.content) if isinstance(value, dict):
for sheet_value in value.values():
content_str += str(sheet_value) + "\n"
else:
content_str += str(value) + "\n"
new_chunks = self._chunk_text(content_str) new_chunks = self._chunk_text(content_str)
self.chunks.extend(new_chunks) self.chunks.extend(new_chunks)

View File

@@ -5,15 +5,17 @@ import sys
import threading import threading
import warnings import warnings
from contextlib import contextmanager from contextlib import contextmanager
from typing import Any, Dict, List, Literal, Optional, Union, cast from typing import Any, Dict, List, Literal, Optional, Type, Union, cast
from dotenv import load_dotenv from dotenv import load_dotenv
from pydantic import BaseModel
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning) warnings.simplefilter("ignore", UserWarning)
import litellm import litellm
from litellm import Choices, get_supported_openai_params from litellm import Choices, get_supported_openai_params
from litellm.types.utils import ModelResponse from litellm.types.utils import ModelResponse
from litellm.utils import supports_response_schema
from crewai.utilities.exceptions.context_window_exceeding_exception import ( from crewai.utilities.exceptions.context_window_exceeding_exception import (
@@ -128,7 +130,7 @@ class LLM:
presence_penalty: Optional[float] = None, presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None,
logit_bias: Optional[Dict[int, float]] = None, logit_bias: Optional[Dict[int, float]] = None,
response_format: Optional[Dict[str, Any]] = None, response_format: Optional[Type[BaseModel]] = None,
seed: Optional[int] = None, seed: Optional[int] = None,
logprobs: Optional[int] = None, logprobs: Optional[int] = None,
top_logprobs: Optional[int] = None, top_logprobs: Optional[int] = None,
@@ -162,6 +164,7 @@ class LLM:
self.context_window_size = 0 self.context_window_size = 0
self.reasoning_effort = reasoning_effort self.reasoning_effort = reasoning_effort
self.additional_params = kwargs self.additional_params = kwargs
self.is_anthropic = self._is_anthropic_model(model)
litellm.drop_params = True litellm.drop_params = True
@@ -176,55 +179,88 @@ class LLM:
self.set_callbacks(callbacks) self.set_callbacks(callbacks)
self.set_env_callbacks() self.set_env_callbacks()
def _is_anthropic_model(self, model: str) -> bool:
"""Determine if the model is from Anthropic provider.
Args:
model: The model identifier string.
Returns:
bool: True if the model is from Anthropic, False otherwise.
"""
ANTHROPIC_PREFIXES = ('anthropic/', 'claude-', 'claude/')
return any(prefix in model.lower() for prefix in ANTHROPIC_PREFIXES)
def call( def call(
self, self,
messages: Union[str, List[Dict[str, str]]], messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None, tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None, callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None, available_functions: Optional[Dict[str, Any]] = None,
) -> str: ) -> Union[str, Any]:
""" """High-level LLM call method.
High-level llm call method that:
1) Accepts either a string or a list of messages Args:
2) Converts string input to the required message format messages: Input messages for the LLM.
3) Calls litellm.completion Can be a string or list of message dictionaries.
4) Handles function/tool calls if any If string, it will be converted to a single user message.
5) Returns the final text response or tool result If list, each dict must have 'role' and 'content' keys.
tools: Optional list of tool schemas for function calling.
Parameters: Each tool should define its name, description, and parameters.
- messages (Union[str, List[Dict[str, str]]]): The input messages for the LLM. callbacks: Optional list of callback functions to be executed
- If a string is provided, it will be converted into a message list with a single entry. during and after the LLM call.
- If a list of dictionaries is provided, each dictionary should have 'role' and 'content' keys. available_functions: Optional dict mapping function names to callables
- tools (Optional[List[dict]]): A list of tool schemas for function calling. that can be invoked by the LLM.
- callbacks (Optional[List[Any]]): A list of callback functions to be executed.
- available_functions (Optional[Dict[str, Any]]): A dictionary mapping function names to actual Python functions.
Returns: Returns:
- str: The final text response from the LLM or the result of a tool function call. Union[str, Any]: Either a text response from the LLM (str) or
the result of a tool function call (Any).
Raises:
TypeError: If messages format is invalid
ValueError: If response format is not supported
LLMContextLengthExceededException: If input exceeds model's context limit
Examples: Examples:
--------- # Example 1: Simple string input
# Example 1: Using a string input >>> response = llm.call("Return the name of a random city.")
response = llm.call("Return the name of a random city in the world.") >>> print(response)
print(response) "Paris"
# Example 2: Using a list of messages # Example 2: Message list with system and user messages
messages = [{"role": "user", "content": "What is the capital of France?"}] >>> messages = [
response = llm.call(messages) ... {"role": "system", "content": "You are a geography expert"},
print(response) ... {"role": "user", "content": "What is France's capital?"}
... ]
>>> response = llm.call(messages)
>>> print(response)
"The capital of France is Paris."
""" """
# Validate parameters before proceeding with the call.
self._validate_call_params()
if isinstance(messages, str): if isinstance(messages, str):
messages = [{"role": "user", "content": messages}] messages = [{"role": "user", "content": messages}]
# For O1 models, system messages are not supported.
# Convert any system messages into assistant messages.
if "o1" in self.model.lower():
for message in messages:
if message.get("role") == "system":
message["role"] = "assistant"
with suppress_warnings(): with suppress_warnings():
if callbacks and len(callbacks) > 0: if callbacks and len(callbacks) > 0:
self.set_callbacks(callbacks) self.set_callbacks(callbacks)
try: try:
# --- 1) Prepare the parameters for the completion call # --- 1) Format messages according to provider requirements
formatted_messages = self._format_messages_for_provider(messages)
# --- 2) Prepare the parameters for the completion call
params = { params = {
"model": self.model, "model": self.model,
"messages": messages, "messages": formatted_messages,
"timeout": self.timeout, "timeout": self.timeout,
"temperature": self.temperature, "temperature": self.temperature,
"top_p": self.top_p, "top_p": self.top_p,
@@ -312,6 +348,68 @@ class LLM:
logging.error(f"LiteLLM call failed: {str(e)}") logging.error(f"LiteLLM call failed: {str(e)}")
raise raise
def _format_messages_for_provider(self, messages: List[Dict[str, str]]) -> List[Dict[str, str]]:
"""Format messages according to provider requirements.
Args:
messages: List of message dictionaries with 'role' and 'content' keys.
Can be empty or None.
Returns:
List of formatted messages according to provider requirements.
For Anthropic models, ensures first message has 'user' role.
Raises:
TypeError: If messages is None or contains invalid message format.
"""
if messages is None:
raise TypeError("Messages cannot be None")
# Validate message format first
for msg in messages:
if not isinstance(msg, dict) or "role" not in msg or "content" not in msg:
raise TypeError("Invalid message format. Each message must be a dict with 'role' and 'content' keys")
if not self.is_anthropic:
return messages
# Anthropic requires messages to start with 'user' role
if not messages or messages[0]["role"] == "system":
# If first message is system or empty, add a placeholder user message
return [{"role": "user", "content": "."}, *messages]
return messages
def _get_custom_llm_provider(self) -> str:
"""
Derives the custom_llm_provider from the model string.
- For example, if the model is "openrouter/deepseek/deepseek-chat", returns "openrouter".
- If the model is "gemini/gemini-1.5-pro", returns "gemini".
- If there is no '/', defaults to "openai".
"""
if "/" in self.model:
return self.model.split("/")[0]
return "openai"
def _validate_call_params(self) -> None:
"""
Validate parameters before making a call. Currently this only checks if
a response_format is provided and whether the model supports it.
The custom_llm_provider is dynamically determined from the model:
- E.g., "openrouter/deepseek/deepseek-chat" yields "openrouter"
- "gemini/gemini-1.5-pro" yields "gemini"
- If no slash is present, "openai" is assumed.
"""
provider = self._get_custom_llm_provider()
if self.response_format is not None and not supports_response_schema(
model=self.model,
custom_llm_provider=provider,
):
raise ValueError(
f"The model {self.model} does not support response_format for provider '{provider}'. "
"Please remove response_format or use a supported model."
)
def supports_function_calling(self) -> bool: def supports_function_calling(self) -> bool:
try: try:
params = get_supported_openai_params(model=self.model) params = get_supported_openai_params(model=self.model)

View File

@@ -1,3 +1,7 @@
from typing import Optional
from pydantic import PrivateAttr
from crewai.memory.entity.entity_memory_item import EntityMemoryItem from crewai.memory.entity.entity_memory_item import EntityMemoryItem
from crewai.memory.memory import Memory from crewai.memory.memory import Memory
from crewai.memory.storage.rag_storage import RAGStorage from crewai.memory.storage.rag_storage import RAGStorage
@@ -10,13 +14,15 @@ class EntityMemory(Memory):
Inherits from the Memory class. Inherits from the Memory class.
""" """
def __init__(self, crew=None, embedder_config=None, storage=None, path=None): _memory_provider: Optional[str] = PrivateAttr()
if hasattr(crew, "memory_config") and crew.memory_config is not None:
self.memory_provider = crew.memory_config.get("provider")
else:
self.memory_provider = None
if self.memory_provider == "mem0": def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
memory_provider = crew.memory_config.get("provider")
else:
memory_provider = None
if memory_provider == "mem0":
try: try:
from crewai.memory.storage.mem0_storage import Mem0Storage from crewai.memory.storage.mem0_storage import Mem0Storage
except ImportError: except ImportError:
@@ -36,11 +42,13 @@ class EntityMemory(Memory):
path=path, path=path,
) )
) )
super().__init__(storage)
super().__init__(storage=storage)
self._memory_provider = memory_provider
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory" def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
"""Saves an entity item into the SQLite storage.""" """Saves an entity item into the SQLite storage."""
if self.memory_provider == "mem0": if self._memory_provider == "mem0":
data = f""" data = f"""
Remember details about the following entity: Remember details about the following entity:
Name: {item.name} Name: {item.name}

View File

@@ -17,7 +17,7 @@ class LongTermMemory(Memory):
def __init__(self, storage=None, path=None): def __init__(self, storage=None, path=None):
if not storage: if not storage:
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage() storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
super().__init__(storage) super().__init__(storage=storage)
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory" def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
metadata = item.metadata metadata = item.metadata

View File

@@ -1,15 +1,19 @@
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
from crewai.memory.storage.rag_storage import RAGStorage from pydantic import BaseModel
class Memory: class Memory(BaseModel):
""" """
Base class for memory, now supporting agent tags and generic metadata. Base class for memory, now supporting agent tags and generic metadata.
""" """
def __init__(self, storage: RAGStorage): embedder_config: Optional[Dict[str, Any]] = None
self.storage = storage
storage: Any
def __init__(self, storage: Any, **data: Any):
super().__init__(storage=storage, **data)
def save( def save(
self, self,

View File

@@ -1,5 +1,7 @@
from typing import Any, Dict, Optional from typing import Any, Dict, Optional
from pydantic import PrivateAttr
from crewai.memory.memory import Memory from crewai.memory.memory import Memory
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
from crewai.memory.storage.rag_storage import RAGStorage from crewai.memory.storage.rag_storage import RAGStorage
@@ -14,13 +16,15 @@ class ShortTermMemory(Memory):
MemoryItem instances. MemoryItem instances.
""" """
def __init__(self, crew=None, embedder_config=None, storage=None, path=None): _memory_provider: Optional[str] = PrivateAttr()
if hasattr(crew, "memory_config") and crew.memory_config is not None:
self.memory_provider = crew.memory_config.get("provider")
else:
self.memory_provider = None
if self.memory_provider == "mem0": def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
memory_provider = crew.memory_config.get("provider")
else:
memory_provider = None
if memory_provider == "mem0":
try: try:
from crewai.memory.storage.mem0_storage import Mem0Storage from crewai.memory.storage.mem0_storage import Mem0Storage
except ImportError: except ImportError:
@@ -39,7 +43,8 @@ class ShortTermMemory(Memory):
path=path, path=path,
) )
) )
super().__init__(storage) super().__init__(storage=storage)
self._memory_provider = memory_provider
def save( def save(
self, self,
@@ -48,7 +53,7 @@ class ShortTermMemory(Memory):
agent: Optional[str] = None, agent: Optional[str] = None,
) -> None: ) -> None:
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent) item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
if self.memory_provider == "mem0": if self._memory_provider == "mem0":
item.data = f"Remember the following insights from Agent run: {item.data}" item.data = f"Remember the following insights from Agent run: {item.data}"
super().save(value=item.data, metadata=item.metadata, agent=item.agent) super().save(value=item.data, metadata=item.metadata, agent=item.agent)

View File

@@ -13,7 +13,7 @@ class BaseRAGStorage(ABC):
self, self,
type: str, type: str,
allow_reset: bool = True, allow_reset: bool = True,
embedder_config: Optional[Any] = None, embedder_config: Optional[Dict[str, Any]] = None,
crew: Any = None, crew: Any = None,
): ):
self.type = type self.type = type

View File

@@ -430,9 +430,13 @@ class Task(BaseModel):
if self.callback: if self.callback:
self.callback(self.output) self.callback(self.output)
if self._execution_span: crew = self.agent.crew # type: ignore[union-attr]
self._telemetry.task_ended(self._execution_span, self, agent.crew) if crew and crew.task_callback and crew.task_callback != self.callback:
self._execution_span = None crew.task_callback(self.output)
if self._execution_span:
self._telemetry.task_ended(self._execution_span, self, agent.crew)
self._execution_span = None
if self.output_file: if self.output_file:
content = ( content = (
@@ -686,19 +690,32 @@ class Task(BaseModel):
return OutputFormat.PYDANTIC return OutputFormat.PYDANTIC
return OutputFormat.RAW return OutputFormat.RAW
def _save_file(self, result: Any) -> None: def _save_file(self, result: Union[Dict, str, Any]) -> None:
"""Save task output to a file. """Save task output to a file.
Note:
For cross-platform file writing, especially on Windows, consider using FileWriterTool
from the crewai_tools package:
pip install 'crewai[tools]'
from crewai_tools import FileWriterTool
Args: Args:
result: The result to save to the file. Can be a dict or any stringifiable object. result: The result to save to the file. Can be a dict or any stringifiable object.
Raises: Raises:
ValueError: If output_file is not set ValueError: If output_file is not set
RuntimeError: If there is an error writing to the file RuntimeError: If there is an error writing to the file. For cross-platform
compatibility, especially on Windows, use FileWriterTool from crewai_tools
package.
""" """
if self.output_file is None: if self.output_file is None:
raise ValueError("output_file is not set.") raise ValueError("output_file is not set.")
FILEWRITER_RECOMMENDATION = (
"For cross-platform file writing, especially on Windows, "
"use FileWriterTool from crewai_tools package."
)
try: try:
resolved_path = Path(self.output_file).expanduser().resolve() resolved_path = Path(self.output_file).expanduser().resolve()
directory = resolved_path.parent directory = resolved_path.parent
@@ -714,7 +731,12 @@ class Task(BaseModel):
else: else:
file.write(str(result)) file.write(str(result))
except (OSError, IOError) as e: except (OSError, IOError) as e:
raise RuntimeError(f"Failed to save output file: {e}") raise RuntimeError(
"\n".join([
f"Failed to save output file: {e}",
FILEWRITER_RECOMMENDATION
])
)
return None return None
def __repr__(self): def __repr__(self):

View File

@@ -7,11 +7,11 @@ from crewai.utilities import I18N
i18n = I18N() i18n = I18N()
class AddImageToolSchema(BaseModel): class AddImageToolSchema(BaseModel):
image_url: str = Field(..., description="The URL or path of the image to add") image_url: str = Field(..., description="The URL or path of the image to add")
action: Optional[str] = Field( action: Optional[str] = Field(
default=None, default=None, description="Optional context or question about the image"
description="Optional context or question about the image"
) )
@@ -36,10 +36,7 @@ class AddImageTool(BaseTool):
"image_url": { "image_url": {
"url": image_url, "url": image_url,
}, },
} },
] ]
return { return {"role": "user", "content": content}
"role": "user",
"content": content
}

View File

@@ -15,7 +15,7 @@
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\n```\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n```", "final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\n```\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n```",
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nHere is the expected format I must follow:\n\n```\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n This Thought/Action/Action Input/Result process can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```", "format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nHere is the expected format I must follow:\n\n```\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n This Thought/Action/Action Input/Result process can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}", "task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.", "expected_output": "\nThis is the expected criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}", "human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
"getting_input": "This is the agent's final answer: {final_answer}\n\n", "getting_input": "This is the agent's final answer: {final_answer}\n\n",
"summarizer_system_message": "You are a helpful assistant that summarizes text.", "summarizer_system_message": "You are a helpful assistant that summarizes text.",

View File

@@ -1,5 +1,5 @@
import os import os
from typing import Any, Dict, cast from typing import Any, Dict, Optional, cast
from chromadb import Documents, EmbeddingFunction, Embeddings from chromadb import Documents, EmbeddingFunction, Embeddings
from chromadb.api.types import validate_embedding_function from chromadb.api.types import validate_embedding_function
@@ -18,11 +18,12 @@ class EmbeddingConfigurator:
"bedrock": self._configure_bedrock, "bedrock": self._configure_bedrock,
"huggingface": self._configure_huggingface, "huggingface": self._configure_huggingface,
"watson": self._configure_watson, "watson": self._configure_watson,
"custom": self._configure_custom,
} }
def configure_embedder( def configure_embedder(
self, self,
embedder_config: Dict[str, Any] | None = None, embedder_config: Optional[Dict[str, Any]] = None,
) -> EmbeddingFunction: ) -> EmbeddingFunction:
"""Configures and returns an embedding function based on the provided config.""" """Configures and returns an embedding function based on the provided config."""
if embedder_config is None: if embedder_config is None:
@@ -30,20 +31,19 @@ class EmbeddingConfigurator:
provider = embedder_config.get("provider") provider = embedder_config.get("provider")
config = embedder_config.get("config", {}) config = embedder_config.get("config", {})
model_name = config.get("model") model_name = config.get("model") if provider != "custom" else None
if isinstance(provider, EmbeddingFunction):
try:
validate_embedding_function(provider)
return provider
except Exception as e:
raise ValueError(f"Invalid custom embedding function: {str(e)}")
if provider not in self.embedding_functions: if provider not in self.embedding_functions:
raise Exception( raise Exception(
f"Unsupported embedding provider: {provider}, supported providers: {list(self.embedding_functions.keys())}" f"Unsupported embedding provider: {provider}, supported providers: {list(self.embedding_functions.keys())}"
) )
return self.embedding_functions[provider](config, model_name)
embedding_function = self.embedding_functions[provider]
return (
embedding_function(config)
if provider == "custom"
else embedding_function(config, model_name)
)
@staticmethod @staticmethod
def _create_default_embedding_function(): def _create_default_embedding_function():
@@ -64,6 +64,13 @@ class EmbeddingConfigurator:
return OpenAIEmbeddingFunction( return OpenAIEmbeddingFunction(
api_key=config.get("api_key") or os.getenv("OPENAI_API_KEY"), api_key=config.get("api_key") or os.getenv("OPENAI_API_KEY"),
model_name=model_name, model_name=model_name,
api_base=config.get("api_base", None),
api_type=config.get("api_type", None),
api_version=config.get("api_version", None),
default_headers=config.get("default_headers", None),
dimensions=config.get("dimensions", None),
deployment_id=config.get("deployment_id", None),
organization_id=config.get("organization_id", None),
) )
@staticmethod @staticmethod
@@ -78,6 +85,10 @@ class EmbeddingConfigurator:
api_type=config.get("api_type", "azure"), api_type=config.get("api_type", "azure"),
api_version=config.get("api_version"), api_version=config.get("api_version"),
model_name=model_name, model_name=model_name,
default_headers=config.get("default_headers"),
dimensions=config.get("dimensions"),
deployment_id=config.get("deployment_id"),
organization_id=config.get("organization_id"),
) )
@staticmethod @staticmethod
@@ -100,6 +111,8 @@ class EmbeddingConfigurator:
return GoogleVertexEmbeddingFunction( return GoogleVertexEmbeddingFunction(
model_name=model_name, model_name=model_name,
api_key=config.get("api_key"), api_key=config.get("api_key"),
project_id=config.get("project_id"),
region=config.get("region"),
) )
@staticmethod @staticmethod
@@ -111,6 +124,7 @@ class EmbeddingConfigurator:
return GoogleGenerativeAiEmbeddingFunction( return GoogleGenerativeAiEmbeddingFunction(
model_name=model_name, model_name=model_name,
api_key=config.get("api_key"), api_key=config.get("api_key"),
task_type=config.get("task_type"),
) )
@staticmethod @staticmethod
@@ -141,9 +155,11 @@ class EmbeddingConfigurator:
AmazonBedrockEmbeddingFunction, AmazonBedrockEmbeddingFunction,
) )
return AmazonBedrockEmbeddingFunction( # Allow custom model_name override with backwards compatibility
session=config.get("session"), kwargs = {"session": config.get("session")}
) if model_name is not None:
kwargs["model_name"] = model_name
return AmazonBedrockEmbeddingFunction(**kwargs)
@staticmethod @staticmethod
def _configure_huggingface(config, model_name): def _configure_huggingface(config, model_name):
@@ -193,3 +209,28 @@ class EmbeddingConfigurator:
raise e raise e
return WatsonEmbeddingFunction() return WatsonEmbeddingFunction()
@staticmethod
def _configure_custom(config):
custom_embedder = config.get("embedder")
if isinstance(custom_embedder, EmbeddingFunction):
try:
validate_embedding_function(custom_embedder)
return custom_embedder
except Exception as e:
raise ValueError(f"Invalid custom embedding function: {str(e)}")
elif callable(custom_embedder):
try:
instance = custom_embedder()
if isinstance(instance, EmbeddingFunction):
validate_embedding_function(instance)
return instance
raise ValueError(
"Custom embedder does not create an EmbeddingFunction instance"
)
except Exception as e:
raise ValueError(f"Error instantiating custom embedder: {str(e)}")
else:
raise ValueError(
"Custom embedder must be an instance of `EmbeddingFunction` or a callable that creates one"
)

View File

@@ -1,30 +1,64 @@
import json
import os import os
import pickle import pickle
from datetime import datetime from datetime import datetime
from typing import Union
class FileHandler: class FileHandler:
"""take care of file operations, currently it only logs messages to a file""" """Handler for file operations supporting both JSON and text-based logging.
Args:
file_path (Union[bool, str]): Path to the log file or boolean flag
"""
def __init__(self, file_path): def __init__(self, file_path: Union[bool, str]):
if isinstance(file_path, bool): self._initialize_path(file_path)
def _initialize_path(self, file_path: Union[bool, str]):
if file_path is True: # File path is boolean True
self._path = os.path.join(os.curdir, "logs.txt") self._path = os.path.join(os.curdir, "logs.txt")
elif isinstance(file_path, str):
self._path = file_path elif isinstance(file_path, str): # File path is a string
if file_path.endswith((".json", ".txt")):
self._path = file_path # No modification if the file ends with .json or .txt
else:
self._path = file_path + ".txt" # Append .txt if the file doesn't end with .json or .txt
else: else:
raise ValueError("file_path must be either a boolean or a string.") raise ValueError("file_path must be a string or boolean.") # Handle the case where file_path isn't valid
def log(self, **kwargs): def log(self, **kwargs):
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S") try:
message = ( now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
f"{now}: " log_entry = {"timestamp": now, **kwargs}
+ ", ".join([f'{key}="{value}"' for key, value in kwargs.items()])
+ "\n"
)
with open(self._path, "a", encoding="utf-8") as file:
file.write(message + "\n")
if self._path.endswith(".json"):
# Append log in JSON format
with open(self._path, "a", encoding="utf-8") as file:
# If the file is empty, start with a list; else, append to it
try:
# Try reading existing content to avoid overwriting
with open(self._path, "r", encoding="utf-8") as read_file:
existing_data = json.load(read_file)
existing_data.append(log_entry)
except (json.JSONDecodeError, FileNotFoundError):
# If no valid JSON or file doesn't exist, start with an empty list
existing_data = [log_entry]
with open(self._path, "w", encoding="utf-8") as write_file:
json.dump(existing_data, write_file, indent=4)
write_file.write("\n")
else:
# Append log in plain text format
message = f"{now}: " + ", ".join([f"{key}=\"{value}\"" for key, value in kwargs.items()]) + "\n"
with open(self._path, "a", encoding="utf-8") as file:
file.write(message)
except Exception as e:
raise ValueError(f"Failed to log message: {str(e)}")
class PickleHandler: class PickleHandler:
def __init__(self, file_name: str) -> None: def __init__(self, file_name: str) -> None:
""" """

View File

@@ -1183,7 +1183,7 @@ def test_agent_max_retry_limit():
[ [
mock.call( mock.call(
{ {
"input": "Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.", "input": "Say the word: Hi\n\nThis is the expected criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.",
"tool_names": "", "tool_names": "",
"tools": "", "tools": "",
"ask_for_human_input": True, "ask_for_human_input": True,
@@ -1191,7 +1191,7 @@ def test_agent_max_retry_limit():
), ),
mock.call( mock.call(
{ {
"input": "Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.", "input": "Say the word: Hi\n\nThis is the expected criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.",
"tool_names": "", "tool_names": "",
"tools": "", "tools": "",
"ask_for_human_input": True, "ask_for_human_input": True,

View File

@@ -55,72 +55,83 @@ def test_train_invalid_string_iterations(train_crew, runner):
) )
@mock.patch("crewai.cli.reset_memories_command.ShortTermMemory") @mock.patch("crewai.cli.reset_memories_command.get_crew")
@mock.patch("crewai.cli.reset_memories_command.EntityMemory") def test_reset_all_memories(mock_get_crew, runner):
@mock.patch("crewai.cli.reset_memories_command.LongTermMemory") mock_crew = mock.Mock()
@mock.patch("crewai.cli.reset_memories_command.TaskOutputStorageHandler") mock_get_crew.return_value = mock_crew
def test_reset_all_memories( result = runner.invoke(reset_memories, ["-a"])
MockTaskOutputStorageHandler,
MockLongTermMemory,
MockEntityMemory,
MockShortTermMemory,
runner,
):
result = runner.invoke(reset_memories, ["--all"])
MockShortTermMemory().reset.assert_called_once()
MockEntityMemory().reset.assert_called_once()
MockLongTermMemory().reset.assert_called_once()
MockTaskOutputStorageHandler().reset.assert_called_once()
mock_crew.reset_memories.assert_called_once_with(command_type="all")
assert result.output == "All memories have been reset.\n" assert result.output == "All memories have been reset.\n"
@mock.patch("crewai.cli.reset_memories_command.ShortTermMemory") @mock.patch("crewai.cli.reset_memories_command.get_crew")
def test_reset_short_term_memories(MockShortTermMemory, runner): def test_reset_short_term_memories(mock_get_crew, runner):
mock_crew = mock.Mock()
mock_get_crew.return_value = mock_crew
result = runner.invoke(reset_memories, ["-s"]) result = runner.invoke(reset_memories, ["-s"])
MockShortTermMemory().reset.assert_called_once()
mock_crew.reset_memories.assert_called_once_with(command_type="short")
assert result.output == "Short term memory has been reset.\n" assert result.output == "Short term memory has been reset.\n"
@mock.patch("crewai.cli.reset_memories_command.EntityMemory") @mock.patch("crewai.cli.reset_memories_command.get_crew")
def test_reset_entity_memories(MockEntityMemory, runner): def test_reset_entity_memories(mock_get_crew, runner):
mock_crew = mock.Mock()
mock_get_crew.return_value = mock_crew
result = runner.invoke(reset_memories, ["-e"]) result = runner.invoke(reset_memories, ["-e"])
MockEntityMemory().reset.assert_called_once()
mock_crew.reset_memories.assert_called_once_with(command_type="entity")
assert result.output == "Entity memory has been reset.\n" assert result.output == "Entity memory has been reset.\n"
@mock.patch("crewai.cli.reset_memories_command.LongTermMemory") @mock.patch("crewai.cli.reset_memories_command.get_crew")
def test_reset_long_term_memories(MockLongTermMemory, runner): def test_reset_long_term_memories(mock_get_crew, runner):
mock_crew = mock.Mock()
mock_get_crew.return_value = mock_crew
result = runner.invoke(reset_memories, ["-l"]) result = runner.invoke(reset_memories, ["-l"])
MockLongTermMemory().reset.assert_called_once()
mock_crew.reset_memories.assert_called_once_with(command_type="long")
assert result.output == "Long term memory has been reset.\n" assert result.output == "Long term memory has been reset.\n"
@mock.patch("crewai.cli.reset_memories_command.TaskOutputStorageHandler") @mock.patch("crewai.cli.reset_memories_command.get_crew")
def test_reset_kickoff_outputs(MockTaskOutputStorageHandler, runner): def test_reset_kickoff_outputs(mock_get_crew, runner):
mock_crew = mock.Mock()
mock_get_crew.return_value = mock_crew
result = runner.invoke(reset_memories, ["-k"]) result = runner.invoke(reset_memories, ["-k"])
MockTaskOutputStorageHandler().reset.assert_called_once()
mock_crew.reset_memories.assert_called_once_with(command_type="kickoff_outputs")
assert result.output == "Latest Kickoff outputs stored has been reset.\n" assert result.output == "Latest Kickoff outputs stored has been reset.\n"
@mock.patch("crewai.cli.reset_memories_command.ShortTermMemory") @mock.patch("crewai.cli.reset_memories_command.get_crew")
@mock.patch("crewai.cli.reset_memories_command.LongTermMemory") def test_reset_multiple_memory_flags(mock_get_crew, runner):
def test_reset_multiple_memory_flags(MockShortTermMemory, MockLongTermMemory, runner): mock_crew = mock.Mock()
result = runner.invoke( mock_get_crew.return_value = mock_crew
reset_memories, result = runner.invoke(reset_memories, ["-s", "-l"])
[
"-s", # Check that reset_memories was called twice with the correct arguments
"-l", assert mock_crew.reset_memories.call_count == 2
], mock_crew.reset_memories.assert_has_calls(
[mock.call(command_type="long"), mock.call(command_type="short")]
) )
MockShortTermMemory().reset.assert_called_once()
MockLongTermMemory().reset.assert_called_once()
assert ( assert (
result.output result.output
== "Long term memory has been reset.\nShort term memory has been reset.\n" == "Long term memory has been reset.\nShort term memory has been reset.\n"
) )
@mock.patch("crewai.cli.reset_memories_command.get_crew")
def test_reset_knowledge(mock_get_crew, runner):
mock_crew = mock.Mock()
mock_get_crew.return_value = mock_crew
result = runner.invoke(reset_memories, ["--knowledge"])
mock_crew.reset_memories.assert_called_once_with(command_type="knowledge")
assert result.output == "Knowledge has been reset.\n"
def test_reset_no_memory_flags(runner): def test_reset_no_memory_flags(runner):
result = runner.invoke( result = runner.invoke(
reset_memories, reset_memories,

View File

@@ -2,7 +2,7 @@ research_task:
description: > description: >
Conduct a thorough research about {topic} Conduct a thorough research about {topic}
Make sure you find any interesting and relevant information given Make sure you find any interesting and relevant information given
the current year is 2024. the current year is 2025.
expected_output: > expected_output: >
A list with 10 bullet points of the most relevant information about {topic} A list with 10 bullet points of the most relevant information about {topic}
agent: researcher agent: researcher

View File

@@ -49,6 +49,41 @@ writer = Agent(
) )
def test_crew_with_only_conditional_tasks_raises_error():
"""Test that creating a crew with only conditional tasks raises an error."""
def condition_func(task_output: TaskOutput) -> bool:
return True
conditional1 = ConditionalTask(
description="Conditional task 1",
expected_output="Output 1",
agent=researcher,
condition=condition_func,
)
conditional2 = ConditionalTask(
description="Conditional task 2",
expected_output="Output 2",
agent=researcher,
condition=condition_func,
)
conditional3 = ConditionalTask(
description="Conditional task 3",
expected_output="Output 3",
agent=researcher,
condition=condition_func,
)
with pytest.raises(
pydantic_core._pydantic_core.ValidationError,
match="Crew must include at least one non-conditional task",
):
Crew(
agents=[researcher],
tasks=[conditional1, conditional2, conditional3],
)
def test_crew_config_conditional_requirement(): def test_crew_config_conditional_requirement():
with pytest.raises(ValueError): with pytest.raises(ValueError):
Crew(process=Process.sequential) Crew(process=Process.sequential)
@@ -556,12 +591,12 @@ def test_crew_with_delegating_agents_should_not_override_task_tools():
_, kwargs = mock_execute_sync.call_args _, kwargs = mock_execute_sync.call_args
tools = kwargs["tools"] tools = kwargs["tools"]
assert any(isinstance(tool, TestTool) for tool in tools), ( assert any(
"TestTool should be present" isinstance(tool, TestTool) for tool in tools
) ), "TestTool should be present"
assert any("delegate" in tool.name.lower() for tool in tools), ( assert any(
"Delegation tool should be present" "delegate" in tool.name.lower() for tool in tools
) ), "Delegation tool should be present"
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@@ -620,12 +655,12 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools():
_, kwargs = mock_execute_sync.call_args _, kwargs = mock_execute_sync.call_args
tools = kwargs["tools"] tools = kwargs["tools"]
assert any(isinstance(tool, TestTool) for tool in new_ceo.tools), ( assert any(
"TestTool should be present" isinstance(tool, TestTool) for tool in new_ceo.tools
) ), "TestTool should be present"
assert any("delegate" in tool.name.lower() for tool in tools), ( assert any(
"Delegation tool should be present" "delegate" in tool.name.lower() for tool in tools
) ), "Delegation tool should be present"
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@@ -749,17 +784,17 @@ def test_task_tools_override_agent_tools_with_allow_delegation():
used_tools = kwargs["tools"] used_tools = kwargs["tools"]
# Confirm AnotherTestTool is present but TestTool is not # Confirm AnotherTestTool is present but TestTool is not
assert any(isinstance(tool, AnotherTestTool) for tool in used_tools), ( assert any(
"AnotherTestTool should be present" isinstance(tool, AnotherTestTool) for tool in used_tools
) ), "AnotherTestTool should be present"
assert not any(isinstance(tool, TestTool) for tool in used_tools), ( assert not any(
"TestTool should not be present among used tools" isinstance(tool, TestTool) for tool in used_tools
) ), "TestTool should not be present among used tools"
# Confirm delegation tool(s) are present # Confirm delegation tool(s) are present
assert any("delegate" in tool.name.lower() for tool in used_tools), ( assert any(
"Delegation tool should be present" "delegate" in tool.name.lower() for tool in used_tools
) ), "Delegation tool should be present"
# Finally, make sure the agent's original tools remain unchanged # Finally, make sure the agent's original tools remain unchanged
assert len(researcher_with_delegation.tools) == 1 assert len(researcher_with_delegation.tools) == 1
@@ -1560,9 +1595,9 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
# Verify that exactly one tool was used and it was a CodeInterpreterTool # Verify that exactly one tool was used and it was a CodeInterpreterTool
assert len(used_tools) == 1, "Should have exactly one tool" assert len(used_tools) == 1, "Should have exactly one tool"
assert isinstance(used_tools[0], CodeInterpreterTool), ( assert isinstance(
"Tool should be CodeInterpreterTool" used_tools[0], CodeInterpreterTool
) ), "Tool should be CodeInterpreterTool"
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@@ -1917,6 +1952,78 @@ def test_task_callback_on_crew():
assert isinstance(args[0], TaskOutput) assert isinstance(args[0], TaskOutput)
def test_task_callback_both_on_task_and_crew():
from unittest.mock import MagicMock, patch
mock_callback_on_task = MagicMock()
mock_callback_on_crew = MagicMock()
researcher_agent = Agent(
role="Researcher",
goal="Make the best research and analysis on content about AI and AI agents",
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
allow_delegation=False,
)
list_ideas = Task(
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
expected_output="Bullet point list of 5 important events.",
agent=researcher_agent,
async_execution=True,
callback=mock_callback_on_task,
)
crew = Crew(
agents=[researcher_agent],
process=Process.sequential,
tasks=[list_ideas],
task_callback=mock_callback_on_crew,
)
with patch.object(Agent, "execute_task") as execute:
execute.return_value = "ok"
crew.kickoff()
assert list_ideas.callback is not None
mock_callback_on_task.assert_called_once_with(list_ideas.output)
mock_callback_on_crew.assert_called_once_with(list_ideas.output)
def test_task_same_callback_both_on_task_and_crew():
from unittest.mock import MagicMock, patch
mock_callback = MagicMock()
researcher_agent = Agent(
role="Researcher",
goal="Make the best research and analysis on content about AI and AI agents",
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
allow_delegation=False,
)
list_ideas = Task(
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
expected_output="Bullet point list of 5 important events.",
agent=researcher_agent,
async_execution=True,
callback=mock_callback,
)
crew = Crew(
agents=[researcher_agent],
process=Process.sequential,
tasks=[list_ideas],
task_callback=mock_callback,
)
with patch.object(Agent, "execute_task") as execute:
execute.return_value = "ok"
crew.kickoff()
assert list_ideas.callback is not None
mock_callback.assert_called_once_with(list_ideas.output)
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_tools_with_custom_caching(): def test_tools_with_custom_caching():
from unittest.mock import patch from unittest.mock import patch
@@ -1989,6 +2096,210 @@ def test_tools_with_custom_caching():
assert result.raw == "3" assert result.raw == "3"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_conditional_task_uses_last_output():
"""Test that conditional tasks use the last task output for condition evaluation."""
task1 = Task(
description="First task",
expected_output="First output",
agent=researcher,
)
def condition_fails(task_output: TaskOutput) -> bool:
# This condition will never be met
return "never matches" in task_output.raw.lower()
def condition_succeeds(task_output: TaskOutput) -> bool:
# This condition will match first task's output
return "first success" in task_output.raw.lower()
conditional_task1 = ConditionalTask(
description="Second task - conditional that fails condition",
expected_output="Second output",
agent=researcher,
condition=condition_fails,
)
conditional_task2 = ConditionalTask(
description="Third task - conditional that succeeds using first task output",
expected_output="Third output",
agent=writer,
condition=condition_succeeds,
)
crew = Crew(
agents=[researcher, writer],
tasks=[task1, conditional_task1, conditional_task2],
)
# Mock outputs for tasks
mock_first = TaskOutput(
description="First task output",
raw="First success output", # Will be used by third task's condition
agent=researcher.role,
)
mock_third = TaskOutput(
description="Third task output",
raw="Third task executed", # Output when condition succeeds using first task output
agent=writer.role,
)
# Set up mocks for task execution and conditional logic
with patch.object(ConditionalTask, "should_execute") as mock_should_execute:
# First conditional fails, second succeeds
mock_should_execute.side_effect = [False, True]
with patch.object(Task, "execute_sync") as mock_execute:
mock_execute.side_effect = [mock_first, mock_third]
result = crew.kickoff()
# Verify execution behavior
assert mock_execute.call_count == 2 # Only first and third tasks execute
assert mock_should_execute.call_count == 2 # Both conditionals checked
# Verify outputs collection:
# First executed task output, followed by an automatically generated (skipped) output, then the conditional execution
assert len(result.tasks_output) == 3
assert (
result.tasks_output[0].raw == "First success output"
) # First task succeeded
assert (
result.tasks_output[1].raw == ""
) # Second task skipped (condition failed)
assert (
result.tasks_output[2].raw == "Third task executed"
) # Third task used first task's output
@pytest.mark.vcr(filter_headers=["authorization"])
def test_conditional_tasks_result_collection():
"""Test that task outputs are properly collected based on execution status."""
task1 = Task(
description="Normal task that always executes",
expected_output="First output",
agent=researcher,
)
def condition_never_met(task_output: TaskOutput) -> bool:
return "never matches" in task_output.raw.lower()
def condition_always_met(task_output: TaskOutput) -> bool:
return "success" in task_output.raw.lower()
task2 = ConditionalTask(
description="Conditional task that never executes",
expected_output="Second output",
agent=researcher,
condition=condition_never_met,
)
task3 = ConditionalTask(
description="Conditional task that always executes",
expected_output="Third output",
agent=writer,
condition=condition_always_met,
)
crew = Crew(
agents=[researcher, writer],
tasks=[task1, task2, task3],
)
# Mock outputs for different execution paths
mock_success = TaskOutput(
description="Success output",
raw="Success output", # Triggers third task's condition
agent=researcher.role,
)
mock_conditional = TaskOutput(
description="Conditional output",
raw="Conditional task executed",
agent=writer.role,
)
# Set up mocks for task execution and conditional logic
with patch.object(ConditionalTask, "should_execute") as mock_should_execute:
# First conditional fails, second succeeds
mock_should_execute.side_effect = [False, True]
with patch.object(Task, "execute_sync") as mock_execute:
mock_execute.side_effect = [mock_success, mock_conditional]
result = crew.kickoff()
# Verify execution behavior
assert mock_execute.call_count == 2 # Only first and third tasks execute
assert mock_should_execute.call_count == 2 # Both conditionals checked
# Verify task output collection:
# There should be three outputs: normal task, skipped conditional task (empty output),
# and the conditional task that executed.
assert len(result.tasks_output) == 3
assert (
result.tasks_output[0].raw == "Success output"
) # Normal task executed
assert result.tasks_output[1].raw == "" # Second task skipped
assert (
result.tasks_output[2].raw == "Conditional task executed"
) # Third task executed
# Verify task output collection
assert len(result.tasks_output) == 3
assert (
result.tasks_output[0].raw == "Success output"
) # Normal task executed
assert result.tasks_output[1].raw == "" # Second task skipped
assert (
result.tasks_output[2].raw == "Conditional task executed"
) # Third task executed
@pytest.mark.vcr(filter_headers=["authorization"])
def test_multiple_conditional_tasks():
"""Test that having multiple conditional tasks in sequence works correctly."""
task1 = Task(
description="Initial research task",
expected_output="Research output",
agent=researcher,
)
def condition1(task_output: TaskOutput) -> bool:
return "success" in task_output.raw.lower()
def condition2(task_output: TaskOutput) -> bool:
return "proceed" in task_output.raw.lower()
task2 = ConditionalTask(
description="First conditional task",
expected_output="Conditional output 1",
agent=writer,
condition=condition1,
)
task3 = ConditionalTask(
description="Second conditional task",
expected_output="Conditional output 2",
agent=writer,
condition=condition2,
)
crew = Crew(
agents=[researcher, writer],
tasks=[task1, task2, task3],
)
# Mock different task outputs to test conditional logic
mock_success = TaskOutput(
description="Mock success",
raw="Success and proceed output",
agent=researcher.role,
)
# Set up mocks for task execution
with patch.object(Task, "execute_sync", return_value=mock_success) as mock_execute:
result = crew.kickoff()
# Verify all tasks were executed (no IndexError)
assert mock_execute.call_count == 3
assert len(result.tasks_output) == 3
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_using_contextual_memory(): def test_using_contextual_memory():
from unittest.mock import patch from unittest.mock import patch
@@ -3107,9 +3418,9 @@ def test_fetch_inputs():
expected_placeholders = {"role_detail", "topic", "field"} expected_placeholders = {"role_detail", "topic", "field"}
actual_placeholders = crew.fetch_inputs() actual_placeholders = crew.fetch_inputs()
assert actual_placeholders == expected_placeholders, ( assert (
f"Expected {expected_placeholders}, but got {actual_placeholders}" actual_placeholders == expected_placeholders
) ), f"Expected {expected_placeholders}, but got {actual_placeholders}"
def test_task_tools_preserve_code_execution_tools(): def test_task_tools_preserve_code_execution_tools():
@@ -3182,20 +3493,20 @@ def test_task_tools_preserve_code_execution_tools():
used_tools = kwargs["tools"] used_tools = kwargs["tools"]
# Verify all expected tools are present # Verify all expected tools are present
assert any(isinstance(tool, TestTool) for tool in used_tools), ( assert any(
"Task's TestTool should be present" isinstance(tool, TestTool) for tool in used_tools
) ), "Task's TestTool should be present"
assert any(isinstance(tool, CodeInterpreterTool) for tool in used_tools), ( assert any(
"CodeInterpreterTool should be present" isinstance(tool, CodeInterpreterTool) for tool in used_tools
) ), "CodeInterpreterTool should be present"
assert any("delegate" in tool.name.lower() for tool in used_tools), ( assert any(
"Delegation tool should be present" "delegate" in tool.name.lower() for tool in used_tools
) ), "Delegation tool should be present"
# Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools) # Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools)
assert len(used_tools) == 4, ( assert (
"Should have TestTool, CodeInterpreter, and 2 delegation tools" len(used_tools) == 4
) ), "Should have TestTool, CodeInterpreter, and 2 delegation tools"
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@@ -3239,9 +3550,9 @@ def test_multimodal_flag_adds_multimodal_tools():
used_tools = kwargs["tools"] used_tools = kwargs["tools"]
# Check that the multimodal tool was added # Check that the multimodal tool was added
assert any(isinstance(tool, AddImageTool) for tool in used_tools), ( assert any(
"AddImageTool should be present when agent is multimodal" isinstance(tool, AddImageTool) for tool in used_tools
) ), "AddImageTool should be present when agent is multimodal"
# Verify we have exactly one tool (just the AddImageTool) # Verify we have exactly one tool (just the AddImageTool)
assert len(used_tools) == 1, "Should only have the AddImageTool" assert len(used_tools) == 1, "Should only have the AddImageTool"
@@ -3467,9 +3778,9 @@ def test_crew_guardrail_feedback_in_context():
assert len(execution_contexts) > 1, "Task should have been executed multiple times" assert len(execution_contexts) > 1, "Task should have been executed multiple times"
# Verify that the second execution included the guardrail feedback # Verify that the second execution included the guardrail feedback
assert "Output must contain the keyword 'IMPORTANT'" in execution_contexts[1], ( assert (
"Guardrail feedback should be included in retry context" "Output must contain the keyword 'IMPORTANT'" in execution_contexts[1]
) ), "Guardrail feedback should be included in retry context"
# Verify final output meets guardrail requirements # Verify final output meets guardrail requirements
assert "IMPORTANT" in result.raw, "Final output should contain required keyword" assert "IMPORTANT" in result.raw, "Final output should contain required keyword"

View File

@@ -3,6 +3,7 @@ from time import sleep
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
import pytest import pytest
from pydantic import BaseModel
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
from crewai.llm import LLM from crewai.llm import LLM
@@ -205,6 +206,52 @@ def test_llm_passes_additional_params():
assert result == "Test response" assert result == "Test response"
def test_get_custom_llm_provider_openrouter():
llm = LLM(model="openrouter/deepseek/deepseek-chat")
assert llm._get_custom_llm_provider() == "openrouter"
def test_get_custom_llm_provider_gemini():
llm = LLM(model="gemini/gemini-1.5-pro")
assert llm._get_custom_llm_provider() == "gemini"
def test_get_custom_llm_provider_openai():
llm = LLM(model="gpt-4")
assert llm._get_custom_llm_provider() == "openai"
def test_validate_call_params_supported():
class DummyResponse(BaseModel):
a: int
# Patch supports_response_schema to simulate a supported model.
with patch("crewai.llm.supports_response_schema", return_value=True):
llm = LLM(
model="openrouter/deepseek/deepseek-chat", response_format=DummyResponse
)
# Should not raise any error.
llm._validate_call_params()
def test_validate_call_params_not_supported():
class DummyResponse(BaseModel):
a: int
# Patch supports_response_schema to simulate an unsupported model.
with patch("crewai.llm.supports_response_schema", return_value=False):
llm = LLM(model="gemini/gemini-1.5-pro", response_format=DummyResponse)
with pytest.raises(ValueError) as excinfo:
llm._validate_call_params()
assert "does not support response_format" in str(excinfo.value)
def test_validate_call_params_no_response_format():
# When no response_format is provided, no validation error should occur.
llm = LLM(model="gemini/gemini-1.5-pro", response_format=None)
llm._validate_call_params()
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_o3_mini_reasoning_effort_high(): def test_o3_mini_reasoning_effort_high():
llm = LLM( llm = LLM(
@@ -239,6 +286,79 @@ def test_o3_mini_reasoning_effort_medium():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.fixture
def anthropic_llm():
"""Fixture providing an Anthropic LLM instance."""
return LLM(model="anthropic/claude-3-sonnet")
@pytest.fixture
def system_message():
"""Fixture providing a system message."""
return {"role": "system", "content": "test"}
@pytest.fixture
def user_message():
"""Fixture providing a user message."""
return {"role": "user", "content": "test"}
def test_anthropic_message_formatting_edge_cases(anthropic_llm):
"""Test edge cases for Anthropic message formatting."""
# Test None messages
with pytest.raises(TypeError, match="Messages cannot be None"):
anthropic_llm._format_messages_for_provider(None)
# Test empty message list
formatted = anthropic_llm._format_messages_for_provider([])
assert len(formatted) == 1
assert formatted[0]["role"] == "user"
assert formatted[0]["content"] == "."
# Test invalid message format
with pytest.raises(TypeError, match="Invalid message format"):
anthropic_llm._format_messages_for_provider([{"invalid": "message"}])
def test_anthropic_model_detection():
"""Test Anthropic model detection with various formats."""
models = [
("anthropic/claude-3", True),
("claude-instant", True),
("claude/v1", True),
("gpt-4", False),
("", False),
("anthropomorphic", False), # Should not match partial words
]
for model, expected in models:
llm = LLM(model=model)
assert llm.is_anthropic == expected, f"Failed for model: {model}"
def test_anthropic_message_formatting(anthropic_llm, system_message, user_message):
"""Test Anthropic message formatting with fixtures."""
# Test when first message is system
formatted = anthropic_llm._format_messages_for_provider([system_message])
assert len(formatted) == 2
assert formatted[0]["role"] == "user"
assert formatted[0]["content"] == "."
assert formatted[1] == system_message
# Test when first message is already user
formatted = anthropic_llm._format_messages_for_provider([user_message])
assert len(formatted) == 1
assert formatted[0] == user_message
# Test with empty message list
formatted = anthropic_llm._format_messages_for_provider([])
assert len(formatted) == 1
assert formatted[0]["role"] == "user"
assert formatted[0]["content"] == "."
# Test with non-Anthropic model (should not modify messages)
non_anthropic_llm = LLM(model="gpt-4")
formatted = non_anthropic_llm._format_messages_for_provider([system_message])
assert len(formatted) == 1
assert formatted[0] == system_message
def test_deepseek_r1_with_open_router(): def test_deepseek_r1_with_open_router():
if not os.getenv("OPEN_ROUTER_API_KEY"): if not os.getenv("OPEN_ROUTER_API_KEY"):
pytest.skip("OPEN_ROUTER_API_KEY not set; skipping test.") pytest.skip("OPEN_ROUTER_API_KEY not set; skipping test.")

View File

@@ -723,14 +723,14 @@ def test_interpolate_inputs():
) )
task.interpolate_inputs_and_add_conversation_history( task.interpolate_inputs_and_add_conversation_history(
inputs={"topic": "AI", "date": "2024"} inputs={"topic": "AI", "date": "2025"}
) )
assert ( assert (
task.description task.description
== "Give me a list of 5 interesting ideas about AI to explore for an article, what makes them unique and interesting." == "Give me a list of 5 interesting ideas about AI to explore for an article, what makes them unique and interesting."
) )
assert task.expected_output == "Bullet point list of 5 interesting ideas about AI." assert task.expected_output == "Bullet point list of 5 interesting ideas about AI."
assert task.output_file == "/tmp/AI/output_2024.txt" assert task.output_file == "/tmp/AI/output_2025.txt"
task.interpolate_inputs_and_add_conversation_history( task.interpolate_inputs_and_add_conversation_history(
inputs={"topic": "ML", "date": "2025"} inputs={"topic": "ML", "date": "2025"}

2
uv.lock generated
View File

@@ -649,7 +649,7 @@ wheels = [
[[package]] [[package]]
name = "crewai" name = "crewai"
version = "0.100.0" version = "0.100.1"
source = { editable = "." } source = { editable = "." }
dependencies = [ dependencies = [
{ name = "appdirs" }, { name = "appdirs" },