mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-26 01:08:29 +00:00
Compare commits
24 Commits
DOCS/readm
...
feat/cli-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a13741b15 | ||
|
|
97b5d6b809 | ||
|
|
4f266980ef | ||
|
|
b87bcd25bb | ||
|
|
2ff7dad61e | ||
|
|
33ace9bfe2 | ||
|
|
bcdea6849f | ||
|
|
92ca4ce841 | ||
|
|
27f2682301 | ||
|
|
0c3c4fbe17 | ||
|
|
5f3fa857cc | ||
|
|
8eeca55354 | ||
|
|
b5db79da12 | ||
|
|
156f59760c | ||
|
|
50746d5e8b | ||
|
|
bcc050b793 | ||
|
|
d4d7712164 | ||
|
|
414a9ba07e | ||
|
|
3b0286f592 | ||
|
|
5d1f655229 | ||
|
|
f44d3902a4 | ||
|
|
bec9a4941c | ||
|
|
f0e28cd88a | ||
|
|
ff5c55fd54 |
24
README.md
24
README.md
@@ -8,11 +8,11 @@
|
||||
|
||||
<h3>
|
||||
|
||||
[Homepage](https://www.crewai.com/) | [Documentation](https://docs.crewai.com/) | [Chat with Docs](https://chatg.pt/DWjSBZn) | [Examples](https://github.com/crewAIInc/crewAI-examples) | [Discord](https://discord.com/invite/X4JWnZnxPb)
|
||||
[Homepage](https://www.crewai.io/) | [Documentation](https://docs.crewai.com/) | [Chat with Docs](https://chatg.pt/DWjSBZn) | [Examples](https://github.com/joaomdmoura/crewai-examples) | [Discord](https://discord.com/invite/X4JWnZnxPb)
|
||||
|
||||
</h3>
|
||||
|
||||
[](https://github.com/crewAIInc/crewAI)
|
||||
[](https://github.com/joaomdmoura/crewAI)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
</div>
|
||||
@@ -73,7 +73,6 @@ os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key
|
||||
# You can pass an optional llm attribute specifying what model you wanna use.
|
||||
# It can be a local model through Ollama / LM Studio or a remote
|
||||
# model like OpenAI, Mistral, Antrophic or others (https://docs.crewai.com/how-to/LLM-Connections/)
|
||||
# If you don't specify a model, the default is OpenAI gpt-4o
|
||||
#
|
||||
# import os
|
||||
# os.environ['OPENAI_MODEL_NAME'] = 'gpt-3.5-turbo'
|
||||
@@ -154,12 +153,12 @@ In addition to the sequential process, you can use the hierarchical process, whi
|
||||
|
||||
## Examples
|
||||
|
||||
You can test different real life examples of AI crews in the [crewAI-examples repo](https://github.com/crewAIInc/crewAI-examples?tab=readme-ov-file):
|
||||
You can test different real life examples of AI crews in the [crewAI-examples repo](https://github.com/joaomdmoura/crewAI-examples?tab=readme-ov-file):
|
||||
|
||||
- [Landing Page Generator](https://github.com/crewAIInc/crewAI-examples/tree/main/landing_page_generator)
|
||||
- [Landing Page Generator](https://github.com/joaomdmoura/crewAI-examples/tree/main/landing_page_generator)
|
||||
- [Having Human input on the execution](https://docs.crewai.com/how-to/Human-Input-on-Execution)
|
||||
- [Trip Planner](https://github.com/crewAIInc/crewAI-examples/tree/main/trip_planner)
|
||||
- [Stock Analysis](https://github.com/crewAIInc/crewAI-examples/tree/main/stock_analysis)
|
||||
- [Trip Planner](https://github.com/joaomdmoura/crewAI-examples/tree/main/trip_planner)
|
||||
- [Stock Analysis](https://github.com/joaomdmoura/crewAI-examples/tree/main/stock_analysis)
|
||||
|
||||
### Quick Tutorial
|
||||
|
||||
@@ -167,19 +166,19 @@ You can test different real life examples of AI crews in the [crewAI-examples re
|
||||
|
||||
### Write Job Descriptions
|
||||
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/job-posting) or watch a video below:
|
||||
[Check out code for this example](https://github.com/joaomdmoura/crewAI-examples/tree/main/job-posting) or watch a video below:
|
||||
|
||||
[](https://www.youtube.com/watch?v=u98wEMz-9to "Jobs postings")
|
||||
|
||||
### Trip Planner
|
||||
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/trip_planner) or watch a video below:
|
||||
[Check out code for this example](https://github.com/joaomdmoura/crewAI-examples/tree/main/trip_planner) or watch a video below:
|
||||
|
||||
[](https://www.youtube.com/watch?v=xis7rWp-hjs "Trip Planner")
|
||||
|
||||
### Stock Analysis
|
||||
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/stock_analysis) or watch a video below:
|
||||
[Check out code for this example](https://github.com/joaomdmoura/crewAI-examples/tree/main/stock_analysis) or watch a video below:
|
||||
|
||||
[](https://www.youtube.com/watch?v=e0Uj4yWdaAg "Stock Analysis")
|
||||
|
||||
@@ -191,12 +190,13 @@ Please refer to the [Connect crewAI to LLMs](https://docs.crewai.com/how-to/LLM-
|
||||
|
||||
## How CrewAI Compares
|
||||
|
||||
**CrewAI's Advantage**: CrewAI is built with production in mind. It offers the flexibility of Autogen's conversational agents and the structured process approach of ChatDev, but without the rigidity. CrewAI's processes are designed to be dynamic and adaptable, fitting seamlessly into both development and production workflows.
|
||||
|
||||
- **Autogen**: While Autogen does good in creating conversational agents capable of working together, it lacks an inherent concept of process. In Autogen, orchestrating agents' interactions requires additional programming, which can become complex and cumbersome as the scale of tasks grows.
|
||||
|
||||
- **ChatDev**: ChatDev introduced the idea of processes into the realm of AI agents, but its implementation is quite rigid. Customizations in ChatDev are limited and not geared towards production environments, which can hinder scalability and flexibility in real-world applications.
|
||||
|
||||
**CrewAI's Advantage**: CrewAI is built with production in mind. It offers the flexibility of Autogen's conversational agents and the structured process approach of ChatDev, but without the rigidity. CrewAI's processes are designed to be dynamic and adaptable, fitting seamlessly into both development and production workflows.
|
||||
|
||||
|
||||
## Contribution
|
||||
|
||||
CrewAI is open-source and we welcome contributions. If you're looking to contribute, please:
|
||||
|
||||
@@ -4,11 +4,9 @@ description: Kickoff a Crew Asynchronously
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
||||
CrewAI provides the ability to kickoff a crew asynchronously, allowing you to start the crew execution in a non-blocking manner. This feature is particularly useful when you want to run multiple crews concurrently or when you need to perform other tasks while the crew is executing.
|
||||
|
||||
## Asynchronous Crew Execution
|
||||
|
||||
To kickoff a crew asynchronously, use the `kickoff_async()` method. This method initiates the crew execution in a separate thread, allowing the main thread to continue executing other tasks.
|
||||
|
||||
### Method Signature
|
||||
@@ -25,20 +23,10 @@ def kickoff_async(self, inputs: dict) -> CrewOutput:
|
||||
|
||||
- `CrewOutput`: An object representing the result of the crew execution.
|
||||
|
||||
## Potential Use Cases
|
||||
|
||||
- **Parallel Content Generation**: Kickoff multiple independent crews asynchronously, each responsible for generating content on different topics. For example, one crew might research and draft an article on AI trends, while another crew generates social media posts about a new product launch. Each crew operates independently, allowing content production to scale efficiently.
|
||||
|
||||
- **Concurrent Market Research Tasks**: Launch multiple crews asynchronously to conduct market research in parallel. One crew might analyze industry trends, while another examines competitor strategies, and yet another evaluates consumer sentiment. Each crew independently completes its task, enabling faster and more comprehensive insights.
|
||||
|
||||
- **Independent Travel Planning Modules**: Execute separate crews to independently plan different aspects of a trip. One crew might handle flight options, another handles accommodation, and a third plans activities. Each crew works asynchronously, allowing various components of the trip to be planned simultaneously and independently for faster results.
|
||||
|
||||
## Example: Single Asynchronous Crew Execution
|
||||
|
||||
Here's an example of how to kickoff a crew asynchronously using asyncio and awaiting the result:
|
||||
## Example
|
||||
Here's an example of how to kickoff a crew asynchronously:
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crewai import Crew, Agent, Task
|
||||
|
||||
# Create an agent with code execution enabled
|
||||
@@ -61,57 +49,6 @@ analysis_crew = Crew(
|
||||
tasks=[data_analysis_task]
|
||||
)
|
||||
|
||||
# Async function to kickoff the crew asynchronously
|
||||
async def async_crew_execution():
|
||||
result = await analysis_crew.kickoff_async(inputs={"ages": [25, 30, 35, 40, 45]})
|
||||
print("Crew Result:", result)
|
||||
|
||||
# Run the async function
|
||||
asyncio.run(async_crew_execution())
|
||||
```
|
||||
|
||||
## Example: Multiple Asynchronous Crew Executions
|
||||
|
||||
In this example, we'll show how to kickoff multiple crews asynchronously and wait for all of them to complete using asyncio.gather():
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crewai import Crew, Agent, Task
|
||||
|
||||
# Create an agent with code execution enabled
|
||||
coding_agent = Agent(
|
||||
role="Python Data Analyst",
|
||||
goal="Analyze data and provide insights using Python",
|
||||
backstory="You are an experienced data analyst with strong Python skills.",
|
||||
allow_code_execution=True
|
||||
)
|
||||
|
||||
# Create tasks that require code execution
|
||||
task_1 = Task(
|
||||
description="Analyze the first dataset and calculate the average age of participants. Ages: {ages}",
|
||||
agent=coding_agent
|
||||
)
|
||||
|
||||
task_2 = Task(
|
||||
description="Analyze the second dataset and calculate the average age of participants. Ages: {ages}",
|
||||
agent=coding_agent
|
||||
)
|
||||
|
||||
# Create two crews and add tasks
|
||||
crew_1 = Crew(agents=[coding_agent], tasks=[task_1])
|
||||
crew_2 = Crew(agents=[coding_agent], tasks=[task_2])
|
||||
|
||||
# Async function to kickoff multiple crews asynchronously and wait for all to finish
|
||||
async def async_multiple_crews():
|
||||
result_1 = crew_1.kickoff_async(inputs={"ages": [25, 30, 35, 40, 45]})
|
||||
result_2 = crew_2.kickoff_async(inputs={"ages": [20, 22, 24, 28, 30]})
|
||||
|
||||
# Wait for both crews to finish
|
||||
results = await asyncio.gather(result_1, result_2)
|
||||
|
||||
for i, result in enumerate(results, 1):
|
||||
print(f"Crew {i} Result:", result)
|
||||
|
||||
# Run the async function
|
||||
asyncio.run(async_multiple_crews())
|
||||
```
|
||||
# Execute the crew asynchronously
|
||||
result = analysis_crew.kickoff_async(inputs={"ages": [25, 30, 35, 40, 45]})
|
||||
```
|
||||
@@ -112,30 +112,30 @@ Switch between APIs and models seamlessly using environment variables, supportin
|
||||
### Configuration Examples
|
||||
#### FastChat
|
||||
```sh
|
||||
os.environ["OPENAI_API_BASE"]='http://localhost:8001/v1'
|
||||
os.environ["OPENAI_MODEL_NAME"]='oh-2.5m7b-q51'
|
||||
os.environ[OPENAI_API_KEY]='NA'
|
||||
os.environ[OPENAI_API_BASE]="http://localhost:8001/v1"
|
||||
os.environ[OPENAI_MODEL_NAME]='oh-2.5m7b-q51'
|
||||
os.environ[OPENAI_API_KEY]=NA
|
||||
```
|
||||
|
||||
#### LM Studio
|
||||
Launch [LM Studio](https://lmstudio.ai) and go to the Server tab. Then select a model from the dropdown menu and wait for it to load. Once it's loaded, click the green Start Server button and use the URL, port, and API key that's shown (you can modify them). Below is an example of the default settings as of LM Studio 0.2.19:
|
||||
```sh
|
||||
os.environ["OPENAI_API_BASE"]='http://localhost:1234/v1'
|
||||
os.environ["OPENAI_API_KEY"]='lm-studio'
|
||||
os.environ[OPENAI_API_BASE]="http://localhost:1234/v1"
|
||||
os.environ[OPENAI_API_KEY]="lm-studio"
|
||||
```
|
||||
|
||||
#### Groq API
|
||||
```sh
|
||||
os.environ["OPENAI_API_KEY"]='your-groq-api-key'
|
||||
os.environ["OPENAI_MODEL_NAME"]='llama3-8b-8192'
|
||||
os.environ["OPENAI_API_BASE"]='https://api.groq.com/openai/v1'
|
||||
os.environ[OPENAI_API_KEY]=your-groq-api-key
|
||||
os.environ[OPENAI_MODEL_NAME]='llama3-8b-8192'
|
||||
os.environ[OPENAI_API_BASE]=https://api.groq.com/openai/v1
|
||||
```
|
||||
|
||||
#### Mistral API
|
||||
```sh
|
||||
os.environ["OPENAI_API_KEY"]='your-mistral-api-key'
|
||||
os.environ["OPENAI_API_BASE"]='https://api.mistral.ai/v1'
|
||||
os.environ["OPENAI_MODEL_NAME"]='mistral-small'
|
||||
os.environ[OPENAI_API_KEY]=your-mistral-api-key
|
||||
os.environ[OPENAI_API_BASE]=https://api.mistral.ai/v1
|
||||
os.environ[OPENAI_MODEL_NAME]="mistral-small"
|
||||
```
|
||||
|
||||
### Solar
|
||||
@@ -143,8 +143,8 @@ os.environ["OPENAI_MODEL_NAME"]='mistral-small'
|
||||
from langchain_community.chat_models.solar import SolarChat
|
||||
```
|
||||
```sh
|
||||
os.environ["SOLAR_API_BASE"]='https://api.upstage.ai/v1/solar'
|
||||
os.environ["SOLAR_API_KEY"]='your-solar-api-key'
|
||||
os.environ[SOLAR_API_BASE]="https://api.upstage.ai/v1/solar"
|
||||
os.environ[SOLAR_API_KEY]="your-solar-api-key"
|
||||
```
|
||||
|
||||
# Free developer API key available here: https://console.upstage.ai/services/solar
|
||||
@@ -155,7 +155,7 @@ os.environ["SOLAR_API_KEY"]='your-solar-api-key'
|
||||
```python
|
||||
from langchain_cohere import ChatCohere
|
||||
# Initialize language model
|
||||
os.environ["COHERE_API_KEY"]='your-cohere-api-key'
|
||||
os.environ["COHERE_API_KEY"] = "your-cohere-api-key"
|
||||
llm = ChatCohere()
|
||||
|
||||
# Free developer API key available here: https://cohere.com/
|
||||
@@ -166,10 +166,10 @@ llm = ChatCohere()
|
||||
For Azure OpenAI API integration, set the following environment variables:
|
||||
```sh
|
||||
|
||||
os.environ["AZURE_OPENAI_DEPLOYMENT"]='Your deployment'
|
||||
os.environ["OPENAI_API_VERSION"]='2023-12-01-preview'
|
||||
os.environ["AZURE_OPENAI_ENDPOINT"]='Your Endpoint'
|
||||
os.environ["AZURE_OPENAI_API_KEY"]='Your API Key'
|
||||
os.environ[AZURE_OPENAI_DEPLOYMENT] = "Your deployment"
|
||||
os.environ["OPENAI_API_VERSION"] = "2023-12-01-preview"
|
||||
os.environ["AZURE_OPENAI_ENDPOINT"] = "Your Endpoint"
|
||||
os.environ["AZURE_OPENAI_API_KEY"] = "<Your API Key>"
|
||||
```
|
||||
|
||||
### Example Agent with Azure LLM
|
||||
|
||||
@@ -5,39 +5,24 @@ description: Understanding the telemetry data collected by CrewAI and how it con
|
||||
|
||||
## Telemetry
|
||||
|
||||
!!! note "Personal Information"
|
||||
By default, we collect no data that would be considered personal information under GDPR and other privacy regulations.
|
||||
We do collect Tool's names and Agent's roles, so be advised not to include any personal information in the tool's names or the Agent's roles.
|
||||
Because no personal information is collected, it's not necessary to worry about data residency.
|
||||
When `share_crew` is enabled, additional data is collected which may contain personal information if included by the user. Users should exercise caution when enabling this feature to ensure compliance with privacy regulations.
|
||||
CrewAI utilizes anonymous telemetry to gather usage statistics with the primary goal of enhancing the library. Our focus is on improving and developing the features, integrations, and tools most utilized by our users. We don't offer a way to disable it now, but we will in the future.
|
||||
|
||||
CrewAI utilizes anonymous telemetry to gather usage statistics with the primary goal of enhancing the library. Our focus is on improving and developing the features, integrations, and tools most utilized by our users.
|
||||
It's pivotal to understand that **NO data is collected** concerning prompts, task descriptions, agents' backstories or goals, usage of tools, API calls, responses, any data processed by the agents, or secrets and environment variables, with the exception of the conditions mentioned. When the `share_crew` feature is enabled, detailed data including task descriptions, agents' backstories or goals, and other specific attributes are collected to provide deeper insights while respecting user privacy.
|
||||
|
||||
It's pivotal to understand that by default, **NO personal data is collected** concerning prompts, task descriptions, agents' backstories or goals, usage of tools, API calls, responses, any data processed by the agents, or secrets and environment variables.
|
||||
When the `share_crew` feature is enabled, detailed data including task descriptions, agents' backstories or goals, and other specific attributes are collected to provide deeper insights. This expanded data collection may include personal information if users have incorporated it into their crews or tasks. Users should carefully consider the content of their crews and tasks before enabling `share_crew`. Users can disable telemetry by setting the environment variable OTEL_SDK_DISABLED to true.
|
||||
|
||||
### Data Explanation:
|
||||
| Defaulted | Data | Reason and Specifics |
|
||||
|-----------|-------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------|
|
||||
| Yes | CrewAI and Python Version | Tracks software versions. Example: CrewAI v1.2.3, Python 3.8.10. No personal data. |
|
||||
| Yes | Crew Metadata | Includes: randomly generated key and ID, process type (e.g., 'sequential', 'parallel'), boolean flag for memory usage (true/false), count of tasks, count of agents. All non-personal. |
|
||||
| Yes | Agent Data | Includes: randomly generated key and ID, role name (should not include personal info), boolean settings (verbose, delegation enabled, code execution allowed), max iterations, max RPM, max retry limit, LLM info (see LLM Attributes), list of tool names (should not include personal info). No personal data. |
|
||||
| Yes | Task Metadata | Includes: randomly generated key and ID, boolean execution settings (async_execution, human_input), associated agent's role and key, list of tool names. All non-personal. |
|
||||
| Yes | Tool Usage Statistics | Includes: tool name (should not include personal info), number of usage attempts (integer), LLM attributes used. No personal data. |
|
||||
| Yes | Test Execution Data | Includes: crew's randomly generated key and ID, number of iterations, model name used, quality score (float), execution time (in seconds). All non-personal. |
|
||||
| Yes | Task Lifecycle Data | Includes: creation and execution start/end times, crew and task identifiers. Stored as spans with timestamps. No personal data. |
|
||||
| Yes | LLM Attributes | Includes: name, model_name, model, top_k, temperature, and class name of the LLM. All technical, non-personal data. |
|
||||
| Yes | Crew Deployment attempt using crewAI CLI | Includes: The fact a deploy is being made and crew id, and if it's trying to pull logs, no other data. |
|
||||
| No | Agent's Expanded Data | Includes: goal description, backstory text, i18n prompt file identifier. Users should ensure no personal info is included in text fields. |
|
||||
| No | Detailed Task Information | Includes: task description, expected output description, context references. Users should ensure no personal info is included in these fields. |
|
||||
| No | Environment Information | Includes: platform, release, system, version, and CPU count. Example: 'Windows 10', 'x86_64'. No personal data. |
|
||||
| No | Crew and Task Inputs and Outputs | Includes: input parameters and output results as non-identifiable data. Users should ensure no personal info is included. |
|
||||
| No | Comprehensive Crew Execution Data | Includes: detailed logs of crew operations, all agents and tasks data, final output. All non-personal and technical in nature. |
|
||||
|
||||
Note: "No" in the "Defaulted" column indicates that this data is only collected when `share_crew` is set to `true`.
|
||||
### Data Collected Includes:
|
||||
- **Version of CrewAI**: Assessing the adoption rate of our latest version helps us understand user needs and guide our updates.
|
||||
- **Python Version**: Identifying the Python versions our users operate with assists in prioritizing our support efforts for these versions.
|
||||
- **General OS Information**: Details like the number of CPUs and the operating system type (macOS, Windows, Linux) enable us to focus our development on the most used operating systems and explore the potential for OS-specific features.
|
||||
- **Number of Agents and Tasks in a Crew**: Ensures our internal testing mirrors real-world scenarios, helping us guide users towards best practices.
|
||||
- **Crew Process Utilization**: Understanding how crews are utilized aids in directing our development focus.
|
||||
- **Memory and Delegation Use by Agents**: Insights into how these features are used help evaluate their effectiveness and future.
|
||||
- **Task Execution Mode**: Knowing whether tasks are executed in parallel or sequentially influences our emphasis on enhancing parallel execution capabilities.
|
||||
- **Language Model Utilization**: Supports our goal to improve support for the most popular languages among our users.
|
||||
- **Roles of Agents within a Crew**: Understanding the various roles agents play aids in crafting better tools, integrations, and examples.
|
||||
- **Tool Usage**: Identifying which tools are most frequently used allows us to prioritize improvements in those areas.
|
||||
|
||||
### Opt-In Further Telemetry Sharing
|
||||
Users can choose to share their complete telemetry data by enabling the `share_crew` attribute to `True` in their crew configurations. Enabling `share_crew` results in the collection of detailed crew and task execution data, including `goal`, `backstory`, `context`, and `output` of tasks. This enables a deeper insight into usage patterns.
|
||||
Users can choose to share their complete telemetry data by enabling the `share_crew` attribute to `True` in their crew configurations. Enabling `share_crew` results in the collection of detailed crew and task execution data, including `goal`, `backstory`, `context`, and `output` of tasks. This enables a deeper insight into usage patterns while respecting the user's choice to share.
|
||||
|
||||
!!! warning "Potential Personal Information"
|
||||
If you enable `share_crew`, the collected data may include personal information if it has been incorporated into crew configurations, task descriptions, or outputs. Users should carefully review their data and ensure compliance with GDPR and other applicable privacy regulations before enabling this feature.
|
||||
### Updates and Revisions
|
||||
We are committed to maintaining the accuracy and transparency of our documentation. Regular reviews and updates are performed to ensure our documentation accurately reflects the latest developments of our codebase and telemetry practices. Users are encouraged to review this section for the most current information on our data collection practices and how they contribute to the improvement of CrewAI.
|
||||
@@ -2,8 +2,8 @@ site_name: crewAI
|
||||
site_author: crewAI, Inc
|
||||
site_description: Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks.
|
||||
repo_name: crewAI
|
||||
repo_url: https://github.com/crewAIInc/crewAI
|
||||
site_url: https://docs.crewai.com
|
||||
repo_url: https://github.com/joaomdmoura/crewai/
|
||||
site_url: https://crewai.com
|
||||
edit_uri: edit/main/docs/
|
||||
copyright: Copyright © 2024 crewAI, Inc
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "crewai"
|
||||
version = "0.54.0"
|
||||
version = "0.51.1"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
authors = ["Joao Moura <joao@crewai.com>"]
|
||||
readme = "README.md"
|
||||
|
||||
@@ -117,21 +117,16 @@ class Agent(BaseAgent):
|
||||
def post_init_setup(self):
|
||||
self.agent_ops_agent_name = self.role
|
||||
|
||||
# Different llms store the model name in different attributes
|
||||
model_name = getattr(self.llm, "model_name", None) or getattr(
|
||||
self.llm, "deployment_name", None
|
||||
)
|
||||
|
||||
if model_name:
|
||||
self._setup_llm_callbacks(model_name)
|
||||
if hasattr(self.llm, "model_name"):
|
||||
self._setup_llm_callbacks()
|
||||
|
||||
if not self.agent_executor:
|
||||
self._setup_agent_executor()
|
||||
|
||||
return self
|
||||
|
||||
def _setup_llm_callbacks(self, model_name: str):
|
||||
token_handler = TokenCalcHandler(model_name, self._token_process)
|
||||
def _setup_llm_callbacks(self):
|
||||
token_handler = TokenCalcHandler(self.llm.model_name, self._token_process)
|
||||
|
||||
if not isinstance(self.llm.callbacks, list):
|
||||
self.llm.callbacks = []
|
||||
|
||||
@@ -2,5 +2,3 @@ from .cache.cache_handler import CacheHandler
|
||||
from .executor import CrewAgentExecutor
|
||||
from .parser import CrewAgentParser
|
||||
from .tools_handler import ToolsHandler
|
||||
|
||||
__all__ = ["CacheHandler", "CrewAgentExecutor", "CrewAgentParser", "ToolsHandler"]
|
||||
|
||||
2
src/crewai/agents/cache/__init__.py
vendored
2
src/crewai/agents/cache/__init__.py
vendored
@@ -1,3 +1 @@
|
||||
from .cache_handler import CacheHandler
|
||||
|
||||
__all__ = ["CacheHandler"]
|
||||
|
||||
@@ -2,8 +2,6 @@ from os import getenv
|
||||
|
||||
import requests
|
||||
|
||||
from crewai.cli.deploy.utils import get_crewai_version
|
||||
|
||||
|
||||
class CrewAPI:
|
||||
"""
|
||||
@@ -15,7 +13,6 @@ class CrewAPI:
|
||||
self.headers = {
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": f"CrewAI-CLI/{get_crewai_version()}",
|
||||
}
|
||||
self.base_url = getenv(
|
||||
"CREWAI_BASE_URL", "https://dev.crewai.com/crewai_plus/api/v1/crews"
|
||||
|
||||
@@ -2,7 +2,6 @@ from typing import Any, Dict, List, Optional
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.telemetry import Telemetry
|
||||
from .api import CrewAPI
|
||||
from .utils import (
|
||||
fetch_and_json_env_file,
|
||||
@@ -24,13 +23,8 @@ class DeployCommand:
|
||||
Initialize the DeployCommand with project name and API client.
|
||||
"""
|
||||
try:
|
||||
self._telemetry = Telemetry()
|
||||
self._telemetry.set_tracer()
|
||||
access_token = get_auth_token()
|
||||
except Exception:
|
||||
self._deploy_signup_error_span = self._telemetry.deploy_signup_error_span(
|
||||
self
|
||||
)
|
||||
console.print(
|
||||
"Please sign up/login to CrewAI+ before using the CLI.",
|
||||
style="bold red",
|
||||
@@ -39,10 +33,6 @@ class DeployCommand:
|
||||
raise SystemExit
|
||||
|
||||
self.project_name = get_project_name()
|
||||
if self.project_name is None:
|
||||
console.print("No project name found. Please ensure your project has a valid pyproject.toml file.", style="bold red")
|
||||
raise SystemExit
|
||||
|
||||
self.client = CrewAPI(api_key=access_token)
|
||||
|
||||
def _handle_error(self, json_response: Dict[str, Any]) -> None:
|
||||
@@ -100,7 +90,6 @@ class DeployCommand:
|
||||
Args:
|
||||
uuid (Optional[str]): The UUID of the crew to deploy.
|
||||
"""
|
||||
self._start_deployment_span = self._telemetry.start_deployment_span(self, uuid)
|
||||
console.print("Starting deployment...", style="bold blue")
|
||||
if uuid:
|
||||
response = self.client.deploy_by_uuid(uuid)
|
||||
@@ -120,18 +109,10 @@ class DeployCommand:
|
||||
"""
|
||||
Create a new crew deployment.
|
||||
"""
|
||||
self._create_crew_deployment_span = self._telemetry.create_crew_deployment_span(
|
||||
self
|
||||
)
|
||||
console.print("Creating deployment...", style="bold blue")
|
||||
env_vars = fetch_and_json_env_file()
|
||||
remote_repo_url = get_git_remote_url()
|
||||
|
||||
if remote_repo_url is None:
|
||||
console.print("No remote repository URL found.", style="bold red")
|
||||
console.print("Please ensure your project has a valid remote repository.", style="yellow")
|
||||
return
|
||||
|
||||
self._confirm_input(env_vars, remote_repo_url)
|
||||
payload = self._create_payload(env_vars, remote_repo_url)
|
||||
|
||||
@@ -266,9 +247,6 @@ class DeployCommand:
|
||||
uuid (Optional[str]): The UUID of the crew to get logs for.
|
||||
log_type (str): The type of logs to retrieve (default: "deployment").
|
||||
"""
|
||||
self._get_crew_logs_span = self._telemetry.get_crew_logs_span(
|
||||
self, uuid, log_type
|
||||
)
|
||||
console.print(f"Fetching {log_type} logs...", style="bold blue")
|
||||
|
||||
if uuid:
|
||||
@@ -291,7 +269,6 @@ class DeployCommand:
|
||||
Args:
|
||||
uuid (Optional[str]): The UUID of the crew to remove.
|
||||
"""
|
||||
self._remove_crew_span = self._telemetry.remove_crew_span(self, uuid)
|
||||
console.print("Removing deployment...", style="bold blue")
|
||||
|
||||
if uuid:
|
||||
|
||||
@@ -1,46 +1,12 @@
|
||||
import sys
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from rich.console import Console
|
||||
import tomllib
|
||||
|
||||
from ..authentication.utils import TokenManager
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
import tomllib
|
||||
|
||||
|
||||
# Drop the simple_toml_parser when we move to python3.11
|
||||
def simple_toml_parser(content):
|
||||
result = {}
|
||||
current_section = result
|
||||
for line in content.split('\n'):
|
||||
line = line.strip()
|
||||
if line.startswith('[') and line.endswith(']'):
|
||||
# New section
|
||||
section = line[1:-1].split('.')
|
||||
current_section = result
|
||||
for key in section:
|
||||
current_section = current_section.setdefault(key, {})
|
||||
elif '=' in line:
|
||||
key, value = line.split('=', 1)
|
||||
key = key.strip()
|
||||
value = value.strip().strip('"')
|
||||
current_section[key] = value
|
||||
return result
|
||||
|
||||
|
||||
def parse_toml(content):
|
||||
if sys.version_info >= (3, 11):
|
||||
return tomllib.loads(content)
|
||||
else:
|
||||
return simple_toml_parser(content)
|
||||
|
||||
|
||||
def get_git_remote_url() -> str | None:
|
||||
def get_git_remote_url() -> str:
|
||||
"""Get the Git repository's remote URL."""
|
||||
try:
|
||||
# Run the git remote -v command
|
||||
@@ -57,22 +23,21 @@ def get_git_remote_url() -> str | None:
|
||||
if matches:
|
||||
return matches[0] # Return the first match (origin URL)
|
||||
else:
|
||||
console.print("No origin remote found.", style="bold red")
|
||||
print("No origin remote found.")
|
||||
return "No remote URL found"
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
console.print(f"Error running trying to fetch the Git Repository: {e}", style="bold red")
|
||||
return f"Error running trying to fetch the Git Repository: {e}"
|
||||
except FileNotFoundError:
|
||||
console.print("Git command not found. Make sure Git is installed and in your PATH.", style="bold red")
|
||||
|
||||
return None
|
||||
return "Git command not found. Make sure Git is installed and in your PATH."
|
||||
|
||||
|
||||
def get_project_name(pyproject_path: str = "pyproject.toml") -> str | None:
|
||||
def get_project_name(pyproject_path: str = "pyproject.toml"):
|
||||
"""Get the project name from the pyproject.toml file."""
|
||||
try:
|
||||
# Read the pyproject.toml file
|
||||
with open(pyproject_path, "r") as f:
|
||||
pyproject_content = parse_toml(f.read())
|
||||
with open(pyproject_path, "rb") as f:
|
||||
pyproject_content = tomllib.load(f)
|
||||
|
||||
# Extract the project name
|
||||
project_name = pyproject_content["tool"]["poetry"]["name"]
|
||||
@@ -86,39 +51,36 @@ def get_project_name(pyproject_path: str = "pyproject.toml") -> str | None:
|
||||
print(f"Error: {pyproject_path} not found.")
|
||||
except KeyError:
|
||||
print(f"Error: {pyproject_path} is not a valid pyproject.toml file.")
|
||||
except tomllib.TOMLDecodeError if sys.version_info >= (3, 11) else Exception as e: # type: ignore
|
||||
print(
|
||||
f"Error: {pyproject_path} is not a valid TOML file."
|
||||
if sys.version_info >= (3, 11)
|
||||
else f"Error reading the pyproject.toml file: {e}"
|
||||
)
|
||||
except tomllib.TOMLDecodeError:
|
||||
print(f"Error: {pyproject_path} is not a valid TOML file.")
|
||||
except Exception as e:
|
||||
print(f"Error reading the pyproject.toml file: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_crewai_version(poetry_lock_path: str = "poetry.lock") -> str:
|
||||
"""Get the version number of crewai from the poetry.lock file."""
|
||||
def get_crewai_version(pyproject_path: str = "pyproject.toml") -> str:
|
||||
"""Get the version number of crewai from the pyproject.toml file."""
|
||||
try:
|
||||
with open(poetry_lock_path, "r") as f:
|
||||
lock_content = f.read()
|
||||
# Read the pyproject.toml file
|
||||
with open("pyproject.toml", "rb") as f:
|
||||
pyproject_content = tomllib.load(f)
|
||||
|
||||
match = re.search(
|
||||
r'\[\[package\]\]\s*name\s*=\s*"crewai"\s*version\s*=\s*"([^"]+)"',
|
||||
lock_content,
|
||||
re.DOTALL,
|
||||
)
|
||||
if match:
|
||||
return match.group(1)
|
||||
else:
|
||||
print("crewai package not found in poetry.lock")
|
||||
return "no-version-found"
|
||||
# Extract the version number of crewai
|
||||
crewai_version = pyproject_content["tool"]["poetry"]["dependencies"]["crewai"][
|
||||
"version"
|
||||
]
|
||||
|
||||
return crewai_version
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Error: {poetry_lock_path} not found.")
|
||||
print(f"Error: {pyproject_path} not found.")
|
||||
except KeyError:
|
||||
print(f"Error: {pyproject_path} is not a valid pyproject.toml file.")
|
||||
except tomllib.TOMLDecodeError:
|
||||
print(f"Error: {pyproject_path} is not a valid TOML file.")
|
||||
except Exception as e:
|
||||
print(f"Error reading the poetry.lock file: {e}")
|
||||
print(f"Error reading the pyproject.toml file: {e}")
|
||||
|
||||
return "no-version-found"
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = { extras = ["tools"], version = ">=0.54.0,<1.0.0" }
|
||||
crewai = { extras = ["tools"], version = ">=0.51.0,<1.0.0" }
|
||||
|
||||
|
||||
[tool.poetry.scripts]
|
||||
|
||||
@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = { extras = ["tools"], version = ">=0.54.0,<1.0.0" }
|
||||
crewai = { extras = ["tools"], version = ">=0.51.0,<1.0.0" }
|
||||
asyncio = "*"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
|
||||
@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = { extras = ["tools"], version = ">=0.54.0,<1.0.0" }
|
||||
crewai = { extras = ["tools"], version = ">=0.51.0,<1.0.0" }
|
||||
|
||||
|
||||
[tool.poetry.scripts]
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
from .crew_output import CrewOutput
|
||||
|
||||
__all__ = ["CrewOutput"]
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from .entity.entity_memory import EntityMemory
|
||||
from .long_term.long_term_memory import LongTermMemory
|
||||
from .short_term.short_term_memory import ShortTermMemory
|
||||
|
||||
__all__ = ["EntityMemory", "LongTermMemory", "ShortTermMemory"]
|
||||
|
||||
@@ -5,14 +5,13 @@ import os
|
||||
import shutil
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
from embedchain import App
|
||||
from embedchain.llm.base import BaseLlm
|
||||
from embedchain.models.data_type import DataType
|
||||
from embedchain.vectordb.chroma import InvalidDimensionException
|
||||
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def suppress_logging(
|
||||
@@ -78,12 +77,12 @@ class RAGStorage(Storage):
|
||||
self.app.llm = FakeLLM()
|
||||
if allow_reset:
|
||||
self.app.reset()
|
||||
|
||||
|
||||
def _sanitize_role(self, role: str) -> str:
|
||||
"""
|
||||
Sanitizes agent roles to ensure valid directory names.
|
||||
"""
|
||||
return role.replace("\n", "").replace(" ", "_").replace("/", "_")
|
||||
return role.replace('\n', '').replace(' ', '_').replace('/', '_')
|
||||
|
||||
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||
self._generate_embedding(value, metadata)
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from crewai.pipeline.pipeline import Pipeline
|
||||
from crewai.pipeline.pipeline_kickoff_result import PipelineKickoffResult
|
||||
from crewai.pipeline.pipeline_output import PipelineOutput
|
||||
|
||||
__all__ = ["Pipeline", "PipelineKickoffResult", "PipelineOutput"]
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
from crewai.routers.router import Router
|
||||
|
||||
__all__ = ["Router"]
|
||||
|
||||
@@ -273,9 +273,7 @@ class Task(BaseModel):
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json()
|
||||
if pydantic_output
|
||||
else result
|
||||
else pydantic_output.model_dump_json() if pydantic_output else result
|
||||
)
|
||||
self._save_file(content)
|
||||
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
from .telemetry import Telemetry
|
||||
|
||||
__all__ = ["Telemetry"]
|
||||
|
||||
@@ -4,7 +4,7 @@ import asyncio
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import pkg_resources
|
||||
from opentelemetry import trace
|
||||
@@ -28,6 +28,18 @@ class Telemetry:
|
||||
agents backstories or goals nor responses or any data that is being
|
||||
processed by the agents, nor any secrets and env vars.
|
||||
|
||||
Data collected includes:
|
||||
- Version of crewAI
|
||||
- Version of Python
|
||||
- General OS (e.g. number of CPUs, macOS/Windows/Linux)
|
||||
- Number of agents and tasks in a crew
|
||||
- Crew Process being used
|
||||
- If Agents are using memory or allowing delegation
|
||||
- If Tasks are being executed in parallel or sequentially
|
||||
- Language model being used
|
||||
- Roles of agents in a crew
|
||||
- Tools names available
|
||||
|
||||
Users can opt-in to sharing more complete data using the `share_crew`
|
||||
attribute in the Crew class.
|
||||
"""
|
||||
@@ -102,17 +114,10 @@ class Telemetry:
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"i18n": agent.i18n.prompt_file,
|
||||
"function_calling_llm": json.dumps(
|
||||
self._safe_llm_attributes(
|
||||
agent.function_calling_llm
|
||||
)
|
||||
),
|
||||
"llm": json.dumps(
|
||||
self._safe_llm_attributes(agent.llm)
|
||||
),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"allow_code_execution?": agent.allow_code_execution,
|
||||
"max_retry_limit": agent.max_retry_limit,
|
||||
"tools_names": [
|
||||
tool.name.casefold()
|
||||
for tool in agent.tools or []
|
||||
@@ -160,62 +165,7 @@ class Telemetry:
|
||||
self._add_attribute(
|
||||
span, "crew_inputs", json.dumps(inputs) if inputs else None
|
||||
)
|
||||
else:
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_agents",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"key": agent.key,
|
||||
"id": str(agent.id),
|
||||
"role": agent.role,
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"function_calling_llm": json.dumps(
|
||||
self._safe_llm_attributes(
|
||||
agent.function_calling_llm
|
||||
)
|
||||
),
|
||||
"llm": json.dumps(
|
||||
self._safe_llm_attributes(agent.llm)
|
||||
),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"allow_code_execution?": agent.allow_code_execution,
|
||||
"max_retry_limit": agent.max_retry_limit,
|
||||
"tools_names": [
|
||||
tool.name.casefold()
|
||||
for tool in agent.tools or []
|
||||
],
|
||||
}
|
||||
for agent in crew.agents
|
||||
]
|
||||
),
|
||||
)
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_tasks",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"key": task.key,
|
||||
"id": str(task.id),
|
||||
"async_execution?": task.async_execution,
|
||||
"human_input?": task.human_input,
|
||||
"agent_role": task.agent.role
|
||||
if task.agent
|
||||
else "None",
|
||||
"agent_key": task.agent.key if task.agent else None,
|
||||
"tools_names": [
|
||||
tool.name.casefold()
|
||||
for tool in task.tools or []
|
||||
],
|
||||
}
|
||||
for task in crew.tasks
|
||||
]
|
||||
),
|
||||
)
|
||||
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
@@ -399,63 +349,6 @@ class Telemetry:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def deploy_signup_error_span(self):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Deploy Signup Error")
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def start_deployment_span(self, uuid: Optional[str] = None):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Start Deployment")
|
||||
if uuid:
|
||||
self._add_attribute(span, "uuid", uuid)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def create_crew_deployment_span(self):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Create Crew Deployment")
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def get_crew_logs_span(self, uuid: Optional[str], log_type: str = "deployment"):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Get Crew Logs")
|
||||
self._add_attribute(span, "log_type", log_type)
|
||||
if uuid:
|
||||
self._add_attribute(span, "uuid", uuid)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def remove_crew_span(self, uuid: Optional[str] = None):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Remove Crew")
|
||||
if uuid:
|
||||
self._add_attribute(span, "uuid", uuid)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def crew_execution_span(self, crew: Crew, inputs: dict[str, Any] | None):
|
||||
"""Records the complete execution of a crew.
|
||||
This is only collected if the user has opted-in to share the crew.
|
||||
@@ -569,7 +462,7 @@ class Telemetry:
|
||||
pass
|
||||
|
||||
def _safe_llm_attributes(self, llm):
|
||||
attributes = ["name", "model_name", "model", "top_k", "temperature"]
|
||||
attributes = ["name", "model_name", "base_url", "model", "top_k", "temperature"]
|
||||
if llm:
|
||||
safe_attributes = {k: v for k, v in vars(llm).items() if k in attributes}
|
||||
safe_attributes["class"] = llm.__class__.__name__
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import re
|
||||
|
||||
|
||||
class YamlParser:
|
||||
@staticmethod
|
||||
def parse(file):
|
||||
@@ -17,9 +16,7 @@ class YamlParser:
|
||||
|
||||
# Replace single { and } with doubled ones, while leaving already doubled ones intact and the other special characters {# and {%
|
||||
modified_content = re.sub(r"(?<!\{){(?!\{)(?!\#)(?!\%)", "{{", content)
|
||||
modified_content = re.sub(
|
||||
r"(?<!\})(?<!\%)(?<!\#)\}(?!})", "}}", modified_content
|
||||
)
|
||||
modified_content = re.sub(r"(?<!\})(?<!\%)(?<!\#)\}(?!})", "}}", modified_content)
|
||||
|
||||
# Check for 'context:' not followed by '[' and raise an error
|
||||
if re.search(r"context:(?!\s*\[)", modified_content):
|
||||
|
||||
@@ -17,7 +17,6 @@ class TestCrewAPI(unittest.TestCase):
|
||||
{
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": "CrewAI-CLI/no-version-found"
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
import unittest
|
||||
from io import StringIO
|
||||
from unittest.mock import MagicMock, patch
|
||||
import sys
|
||||
|
||||
from crewai.cli.deploy.main import DeployCommand
|
||||
from crewai.cli.deploy.utils import parse_toml
|
||||
|
||||
|
||||
class TestDeployCommand(unittest.TestCase):
|
||||
@patch("crewai.cli.deploy.main.get_auth_token")
|
||||
@@ -152,68 +151,3 @@ class TestDeployCommand(unittest.TestCase):
|
||||
self.assertIn(
|
||||
"Crew 'test_project' removed successfully", fake_out.getvalue()
|
||||
)
|
||||
|
||||
@unittest.skipIf(sys.version_info < (3, 11), "Requires Python 3.11+")
|
||||
def test_parse_toml_python_311_plus(self):
|
||||
toml_content = """
|
||||
[tool.poetry]
|
||||
name = "test_project"
|
||||
version = "0.1.0"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.11"
|
||||
crewai = { extras = ["tools"], version = ">=0.51.0,<1.0.0" }
|
||||
"""
|
||||
parsed = parse_toml(toml_content)
|
||||
self.assertEqual(parsed['tool']['poetry']['name'], 'test_project')
|
||||
|
||||
@patch('builtins.open', new_callable=unittest.mock.mock_open, read_data="""
|
||||
[tool.poetry]
|
||||
name = "test_project"
|
||||
version = "0.1.0"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
crewai = { extras = ["tools"], version = ">=0.51.0,<1.0.0" }
|
||||
""")
|
||||
def test_get_project_name_python_310(self, mock_open):
|
||||
from crewai.cli.deploy.utils import get_project_name
|
||||
project_name = get_project_name()
|
||||
self.assertEqual(project_name, 'test_project')
|
||||
|
||||
@unittest.skipIf(sys.version_info < (3, 11), "Requires Python 3.11+")
|
||||
@patch('builtins.open', new_callable=unittest.mock.mock_open, read_data="""
|
||||
[tool.poetry]
|
||||
name = "test_project"
|
||||
version = "0.1.0"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.11"
|
||||
crewai = { extras = ["tools"], version = ">=0.51.0,<1.0.0" }
|
||||
""")
|
||||
def test_get_project_name_python_311_plus(self, mock_open):
|
||||
from crewai.cli.deploy.utils import get_project_name
|
||||
project_name = get_project_name()
|
||||
self.assertEqual(project_name, 'test_project')
|
||||
|
||||
@patch('builtins.open', new_callable=unittest.mock.mock_open, read_data="""
|
||||
[[package]]
|
||||
name = "crewai"
|
||||
version = "0.51.1"
|
||||
description = "Some description"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.10,<4.0"
|
||||
""")
|
||||
def test_get_crewai_version(self, mock_open):
|
||||
from crewai.cli.deploy.utils import get_crewai_version
|
||||
version = get_crewai_version()
|
||||
self.assertEqual(version, '0.51.1')
|
||||
|
||||
@patch('builtins.open', side_effect=FileNotFoundError)
|
||||
def test_get_crewai_version_file_not_found(self, mock_open):
|
||||
from crewai.cli.deploy.utils import get_crewai_version
|
||||
with patch('sys.stdout', new=StringIO()) as fake_out:
|
||||
version = get_crewai_version()
|
||||
self.assertEqual(version, 'no-version-found')
|
||||
self.assertIn("Error: poetry.lock not found.", fake_out.getvalue())
|
||||
|
||||
@@ -113,9 +113,7 @@ def mock_router_factory(mock_crew_factory):
|
||||
(
|
||||
"route1"
|
||||
if x.get("score", 0) > 80
|
||||
else "route2"
|
||||
if x.get("score", 0) > 50
|
||||
else "default"
|
||||
else "route2" if x.get("score", 0) > 50 else "default"
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user