Compare commits

..

49 Commits

Author SHA1 Message Date
Lorenze Jay
c0ad4576e2 Merge branch 'main' of github.com:crewAIInc/crewAI into knowledge 2024-11-20 15:36:40 -08:00
Lorenze Jay
6359b64d22 added docstrings and type hints for cli 2024-11-20 15:36:12 -08:00
Lorenze Jay
9329119f76 clearer docs 2024-11-20 14:05:15 -08:00
Lorenze Jay
38c0d61b11 more fixes 2024-11-20 14:02:12 -08:00
Lorenze Jay
8564f5551f rm print 2024-11-20 13:49:58 -08:00
Lorenze Jay
8a5404275f linted 2024-11-20 13:48:11 -08:00
Lorenze Jay
52189a46bc more docs 2024-11-20 13:43:08 -08:00
Lorenze Jay
44ab749fda improvements from review 2024-11-20 13:32:00 -08:00
Lorenze Jay
3c4504bd4f better docs 2024-11-20 13:31:13 -08:00
Lorenze Jay
23276cbd76 adding docs 2024-11-19 18:31:09 -08:00
Lorenze Jay
fe18da5e11 fix 2024-11-19 18:22:05 -08:00
Lorenze Jay
76da972ce9 put a flag 2024-11-19 17:42:44 -08:00
Lorenze Jay
4663997b4c verbose run 2024-11-19 17:31:53 -08:00
Lorenze Jay
b185b9e289 linted 2024-11-19 17:29:06 -08:00
Lorenze Jay
787f2eaa7c mock knowledge query to not spin up db 2024-11-19 17:27:17 -08:00
Lorenze Jay
e7d816fb2a Merge branch 'main' of github.com:crewAIInc/crewAI into knowledge 2024-11-19 15:09:33 -08:00
Lorenze Jay
8373c9b521 linted 2024-11-19 14:50:26 -08:00
Lorenze Jay
ec2fe6ff91 just mocks 2024-11-19 14:48:00 -08:00
Lorenze Jay
58bf2d57f7 added extra cassette 2024-11-19 14:16:22 -08:00
Lorenze Jay
705ee16c1c type check fixes 2024-11-19 12:06:29 -08:00
Lorenze Jay
0c5b6f2a93 mypysrc fixes 2024-11-19 12:02:06 -08:00
Lorenze Jay
914067df37 fixed text_file_knowledge 2024-11-19 11:39:18 -08:00
Lorenze Jay
de742c827d improvements 2024-11-19 11:27:01 -08:00
Lorenze Jay
efa8a378a1 None embedder to use default on pipeline cloning 2024-11-19 10:53:09 -08:00
Lorenze Jay
e882725b8a updated default embedder 2024-11-19 10:43:06 -08:00
Lorenze Jay
cbfdbe3b68 generating cassettes for knowledge test 2024-11-19 10:10:14 -08:00
Lorenze Jay
c8bf242633 fix duplicate 2024-11-19 09:59:23 -08:00
Lorenze Jay
70910dd7b4 fix test 2024-11-19 09:41:33 -08:00
Lorenze Jay
b104404418 cleanup rm unused embedder 2024-11-18 16:03:48 -08:00
Lorenze Jay
d579c5ae12 linted 2024-11-18 13:58:23 -08:00
Lorenze Jay
4831dcb85b Merge branch 'main' of github.com:crewAIInc/crewAI into knowledge 2024-11-18 13:55:32 -08:00
Lorenze Jay
cbfcde73ec consolodation and improvements 2024-11-18 13:52:33 -08:00
Lorenze Jay
b2c06d5b7a properly reset memory+knowledge 2024-11-18 13:45:43 -08:00
Lorenze Jay
352d05370e properly reset memory 2024-11-18 13:37:16 -08:00
Lorenze Jay
b90793874c return this 2024-11-15 15:51:07 -08:00
Lorenze Jay
cdf5233523 Merge branch 'main' of github.com:crewAIInc/crewAI into knowledge 2024-11-15 15:42:32 -08:00
Lorenze Jay
cb03ee60b8 improvements all around Knowledge class 2024-11-15 15:28:07 -08:00
Lorenze Jay
10f445e18a ensure embeddings are persisted 2024-11-14 18:31:07 -08:00
Lorenze Jay
98a708ca15 Merge branch 'main' of github.com:crewAIInc/crewAI into knowledge 2024-11-14 12:22:07 -08:00
Brandon Hancock
7b59c5b049 adding in lorenze feedback 2024-11-07 12:10:09 -05:00
Brandon Hancock
86ede8344c update yaml to include optional deps 2024-11-07 11:41:49 -05:00
Brandon Hancock
59165cbad8 fix linting 2024-11-07 11:37:06 -05:00
Brandon Hancock
4af263ca1e Merge branch 'main' into knowledge 2024-11-07 11:33:08 -05:00
Brandon Hancock
617ee989cd added additional sources 2024-11-06 16:41:17 -05:00
Brandon Hancock
6131dbac4f Improve types and better support for file paths 2024-11-06 15:57:03 -05:00
Brandon Hancock
1a35114c08 Adding core knowledge sources 2024-11-06 12:33:55 -05:00
Brandon Hancock
a8a2f80616 WIP 2024-11-05 12:04:58 -05:00
Brandon Hancock
dc314c1151 Merge branch 'main' into knowledge 2024-11-04 15:02:47 -05:00
João Moura
75322b2de1 initial knowledge 2024-11-04 15:53:19 -03:00
40 changed files with 144 additions and 870 deletions

View File

@@ -6,7 +6,7 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Requirements
run: |

View File

@@ -13,10 +13,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v5
uses: actions/setup-python@v4
with:
python-version: '3.10'
@@ -25,7 +25,7 @@ jobs:
run: echo "::set-output name=hash::$(sha256sum requirements-doc.txt | awk '{print $1}')"
- name: Setup cache
uses: actions/cache@v4
uses: actions/cache@v3
with:
key: mkdocs-material-${{ steps.req-hash.outputs.hash }}
path: .cache
@@ -42,4 +42,4 @@ jobs:
GH_TOKEN: ${{ secrets.GH_TOKEN }}
- name: Build and deploy MkDocs
run: mkdocs gh-deploy --force
run: mkdocs gh-deploy --force

View File

@@ -11,7 +11,7 @@ jobs:
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v4
with:
python-version: "3.11.9"

View File

@@ -14,7 +14,7 @@ jobs:
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
uses: actions/setup-python@v4
with:
python-version: "3.11.9"

View File

@@ -100,7 +100,7 @@ You can now start developing your crew by editing the files in the `src/my_proje
#### Example of a simple crew with a sequential process:
Instantiate your crew:
Instatiate your crew:
```shell
crewai create crew latest-ai-development
@@ -121,7 +121,7 @@ researcher:
You're a seasoned researcher with a knack for uncovering the latest
developments in {topic}. Known for your ability to find the most relevant
information and present it in a clear and concise manner.
reporting_analyst:
role: >
{topic} Reporting Analyst
@@ -205,7 +205,7 @@ class LatestAiDevelopmentCrew():
tasks=self.tasks, # Automatically created by the @task decorator
process=Process.sequential,
verbose=True,
)
)
```
**main.py**
@@ -357,7 +357,7 @@ uv run pytest .
### Running static type checks
```bash
uvx mypy src
uvx mypy
```
### Packaging
@@ -399,7 +399,7 @@ Data collected includes:
- Roles of agents in a crew
- Understand high level use cases so we can build better tools, integrations and examples about it
- Tools names available
- Understand out of the publicly available tools, which ones are being used the most so we can improve them
- Understand out of the publically available tools, which ones are being used the most so we can improve them
Users can opt-in to Further Telemetry, sharing the complete telemetry data by setting the `share_crew` attribute to `True` on their Crews. Enabling `share_crew` results in the collection of detailed crew and task execution data, including `goal`, `backstory`, `context`, and `output` of tasks. This enables a deeper insight into usage patterns while respecting the user's choice to share.

View File

@@ -1,6 +1,6 @@
---
title: Knowledge
description: Understand what knowledge is in CrewAI and how to effectively use it.
description: What is knowledge in CrewAI and how to use it.
icon: book
---
@@ -8,14 +8,7 @@ icon: book
## Introduction
Knowledge in CrewAI serves as a foundational component for enriching AI agents with contextual and relevant information. It enables agents to access and utilize structured data sources during their execution processes, making them more intelligent and responsive.
The Knowledge class in CrewAI provides a powerful way to manage and query knowledge sources for your AI agents. This guide will show you how to implement knowledge management in your CrewAI projects.
## What is Knowledge?
The `Knowledge` class in CrewAI manages various sources that store information, which can be queried and retrieved by AI agents. This modular approach allows you to integrate diverse data formats such as text, PDFs, spreadsheets, and more into your AI workflows.
Additionally, we have specific tools for generate knowledge sources for strings, text files, PDF's, and Spreadsheets. You can expand on any source type by extending the `KnowledgeSource` class.
## Basic Implementation
@@ -32,14 +25,17 @@ string_source = StringKnowledgeSource(
content=content, metadata={"preference": "personal"}
)
# Create an agent with the knowledge store
llm = LLM(model="gpt-4o-mini", temperature=0)
# Create an agent with the knowledge store
agent = Agent(
role="About User",
goal="You know everything about the user.",
backstory="""You are a master at understanding people and their preferences.""",
verbose=True
verbose=True,
allow_delegation=False,
llm=llm,
)
task = Task(
description="Answer the following questions about the user: {question}",
expected_output="An answer to the question.",

View File

@@ -310,8 +310,8 @@ These are examples of how to configure LLMs for your agent.
from crewai import LLM
llm = LLM(
model="llama-3.1-sonar-large-128k-online",
base_url="https://api.perplexity.ai/",
model="perplexity/mistral-7b-instruct",
base_url="https://api.perplexity.ai/v1",
api_key="your-api-key-here"
)
agent = Agent(llm=llm, ...)
@@ -400,4 +400,4 @@ This is particularly useful when working with OpenAI-compatible APIs or when you
- **API Errors**: Check your API key, network connection, and rate limits.
- **Unexpected Outputs**: Refine your prompts and adjust temperature or top_p.
- **Performance Issues**: Consider using a more powerful model or optimizing your queries.
- **Timeout Errors**: Increase the `timeout` parameter or optimize your input.
- **Timeout Errors**: Increase the `timeout` parameter or optimize your input.

View File

@@ -1,59 +0,0 @@
---
title: Before and After Kickoff Hooks
description: Learn how to use before and after kickoff hooks in CrewAI
---
CrewAI provides hooks that allow you to execute code before and after a crew's kickoff. These hooks are useful for preprocessing inputs or post-processing results.
## Before Kickoff Hook
The before kickoff hook is executed before the crew starts its tasks. It receives the input dictionary and can modify it before passing it to the crew. You can use this hook to set up your environment, load necessary data, or preprocess your inputs. This is useful in scenarios where the input data might need enrichment or validation before being processed by the crew.
Here's an example of defining a before kickoff function in your `crew.py`:
```python
from crewai import CrewBase, before_kickoff
@CrewBase
class MyCrew:
@before_kickoff
def prepare_data(self, inputs):
# Preprocess or modify inputs
inputs['processed'] = True
return inputs
#...
```
In this example, the prepare_data function modifies the inputs by adding a new key-value pair indicating that the inputs have been processed.
## After Kickoff Hook
The after kickoff hook is executed after the crew has completed its tasks. It receives the result object, which contains the outputs of the crew's execution. This hook is ideal for post-processing results, such as logging, data transformation, or further analysis.
Here's how you can define an after kickoff function in your `crew.py`:
```python
from crewai import CrewBase, after_kickoff
@CrewBase
class MyCrew:
@after_kickoff
def log_results(self, result):
# Log or modify the results
print("Crew execution completed with result:", result)
return result
# ...
```
In the `log_results` function, the results of the crew execution are simply printed out. You can extend this to perform more complex operations such as sending notifications or integrating with other services.
## Utilizing Both Hooks
Both hooks can be used together to provide a comprehensive setup and teardown process for your crew's execution. They are particularly useful in maintaining clean code architecture by separating concerns and enhancing the modularity of your CrewAI implementations.
## Conclusion
Before and after kickoff hooks in CrewAI offer powerful ways to interact with the lifecycle of a crew's execution. By understanding and utilizing these hooks, you can greatly enhance the robustness and flexibility of your AI agents.

View File

@@ -68,7 +68,6 @@
"concepts/tasks",
"concepts/crews",
"concepts/flows",
"concepts/knowledge",
"concepts/llms",
"concepts/processes",
"concepts/collaboration",

View File

@@ -8,7 +8,7 @@ icon: rocket
Let's create a simple crew that will help us `research` and `report` on the `latest AI developments` for a given topic or subject.
Before we proceed, make sure you have `crewai` and `crewai-tools` installed.
Before we proceed, make sure you have `crewai` and `crewai-tools` installed.
If you haven't installed them yet, you can do so by following the [installation guide](/installation).
Follow the steps below to get crewing! 🚣‍♂️
@@ -23,7 +23,7 @@ Follow the steps below to get crewing! 🚣‍♂️
```
</CodeGroup>
</Step>
<Step title="Modify your `agents.yaml` file">
<Step title="Modify your `agents.yaml` file">
<Tip>
You can also modify the agents as needed to fit your use case or copy and paste as is to your project.
Any variable interpolated in your `agents.yaml` and `tasks.yaml` files like `{topic}` will be replaced by the value of the variable in the `main.py` file.
@@ -39,7 +39,7 @@ Follow the steps below to get crewing! 🚣‍♂️
You're a seasoned researcher with a knack for uncovering the latest
developments in {topic}. Known for your ability to find the most relevant
information and present it in a clear and concise manner.
reporting_analyst:
role: >
{topic} Reporting Analyst
@@ -51,7 +51,7 @@ Follow the steps below to get crewing! 🚣‍♂️
it easy for others to understand and act on the information you provide.
```
</Step>
<Step title="Modify your `tasks.yaml` file">
<Step title="Modify your `tasks.yaml` file">
```yaml tasks.yaml
# src/latest_ai_development/config/tasks.yaml
research_task:
@@ -73,8 +73,8 @@ Follow the steps below to get crewing! 🚣‍♂️
agent: reporting_analyst
output_file: report.md
```
</Step>
<Step title="Modify your `crew.py` file">
</Step>
<Step title="Modify your `crew.py` file">
```python crew.py
# src/latest_ai_development/crew.py
from crewai import Agent, Crew, Process, Task
@@ -121,34 +121,10 @@ Follow the steps below to get crewing! 🚣‍♂️
tasks=self.tasks, # Automatically created by the @task decorator
process=Process.sequential,
verbose=True,
)
)
```
</Step>
<Step title="[Optional] Add before and after crew functions">
```python crew.py
# src/latest_ai_development/crew.py
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff
from crewai_tools import SerperDevTool
@CrewBase
class LatestAiDevelopmentCrew():
"""LatestAiDevelopment crew"""
@before_kickoff
def before_kickoff_function(self, inputs):
print(f"Before kickoff function with inputs: {inputs}")
return inputs # You can return the inputs or modify them as needed
@after_kickoff
def after_kickoff_function(self, result):
print(f"After kickoff function with result: {result}")
return result # You can return the result or modify it as needed
# ... remaining code
```
</Step>
<Step title="Feel free to pass custom inputs to your crew">
<Step title="Feel free to pass custom inputs to your crew">
For example, you can pass the `topic` input to your crew to customize the research and reporting.
```python main.py
#!/usr/bin/env python
@@ -261,14 +237,14 @@ Follow the steps below to get crewing! 🚣‍♂️
### Note on Consistency in Naming
The names you use in your YAML files (`agents.yaml` and `tasks.yaml`) should match the method names in your Python code.
For example, you can reference the agent for specific tasks from `tasks.yaml` file.
For example, you can reference the agent for specific tasks from `tasks.yaml` file.
This naming consistency allows CrewAI to automatically link your configurations with your code; otherwise, your task won't recognize the reference properly.
#### Example References
<Tip>
Note how we use the same name for the agent in the `agents.yaml` (`email_summarizer`) file as the method name in the `crew.py` (`email_summarizer`) file.
</Tip>
</Tip>
```yaml agents.yaml
email_summarizer:
@@ -305,8 +281,6 @@ Use the annotations to properly reference the agent and task in the `crew.py` fi
* `@task`
* `@crew`
* `@tool`
* `@before_kickoff`
* `@after_kickoff`
* `@callback`
* `@output_json`
* `@output_pydantic`
@@ -330,7 +304,7 @@ def email_summarizer_task(self) -> Task:
<Tip>
In addition to the [sequential process](../how-to/sequential-process), you can use the [hierarchical process](../how-to/hierarchical-process),
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
You can learn more about the core concepts [here](/concepts).
</Tip>

View File

@@ -1,6 +1,6 @@
[project]
name = "crewai"
version = "0.83.0"
version = "0.80.0"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
readme = "README.md"
requires-python = ">=3.10,<=3.13"
@@ -9,6 +9,7 @@ authors = [
]
dependencies = [
"pydantic>=2.4.2",
"langchain>=0.2.16",
"openai>=1.13.3",
"opentelemetry-api>=1.22.0",
"opentelemetry-sdk>=1.22.0",
@@ -28,8 +29,6 @@ dependencies = [
"tomli-w>=1.1.0",
"tomli>=2.0.2",
"chromadb>=0.5.18",
"pdfplumber>=0.11.4",
"openpyxl>=3.1.5",
]
[project.urls]

View File

@@ -16,7 +16,7 @@ warnings.filterwarnings(
category=UserWarning,
module="pydantic.main",
)
__version__ = "0.83.0"
__version__ = "0.80.0"
__all__ = [
"Agent",
"Crew",

View File

@@ -11,12 +11,10 @@ from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.cli.constants import ENV_VARS
from crewai.llm import LLM
from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.task import Task
from crewai.tools import BaseTool
from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.utilities import Converter, Prompts
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
from crewai.utilities.converter import generate_model_description
from crewai.utilities.token_counter_callback import TokenCalcHandler
from crewai.utilities.training_handler import CrewTrainingHandler
@@ -126,7 +124,7 @@ class Agent(BaseAgent):
@model_validator(mode="after")
def post_init_setup(self):
self.agent_ops_agent_name = self.role
unaccepted_attributes = [
unnacepted_attributes = [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"AWS_REGION_NAME",
@@ -160,23 +158,28 @@ class Agent(BaseAgent):
for provider, env_vars in ENV_VARS.items():
if provider == set_provider:
for env_var in env_vars:
if env_var["key_name"] in unnacepted_attributes:
continue
# Check if the environment variable is set
key_name = env_var.get("key_name")
if key_name and key_name not in unaccepted_attributes:
env_value = os.environ.get(key_name)
if "key_name" in env_var:
env_value = os.environ.get(env_var["key_name"])
if env_value:
# Map key names containing "API_KEY" to "api_key"
key_name = (
"api_key" if "API_KEY" in key_name else key_name
"api_key"
if "API_KEY" in env_var["key_name"]
else env_var["key_name"]
)
# Map key names containing "API_BASE" to "api_base"
key_name = (
"api_base" if "API_BASE" in key_name else key_name
"api_base"
if "API_BASE" in env_var["key_name"]
else key_name
)
# Map key names containing "API_VERSION" to "api_version"
key_name = (
"api_version"
if "API_VERSION" in key_name
if "API_VERSION" in env_var["key_name"]
else key_name
)
llm_params[key_name] = env_value
@@ -234,7 +237,7 @@ class Agent(BaseAgent):
def execute_task(
self,
task: Task,
task: Any,
context: Optional[str] = None,
tools: Optional[List[BaseTool]] = None,
) -> str:
@@ -253,22 +256,6 @@ class Agent(BaseAgent):
task_prompt = task.prompt()
# If the task requires output in JSON or Pydantic format,
# append specific instructions to the task prompt to ensure
# that the final answer does not include any code block markers
if task.output_json or task.output_pydantic:
# Generate the schema based on the output format
if task.output_json:
# schema = json.dumps(task.output_json, indent=2)
schema = generate_model_description(task.output_json)
elif task.output_pydantic:
schema = generate_model_description(task.output_pydantic)
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format(
output_format=schema
)
if context:
task_prompt = self.i18n.slice("task_with_context").format(
task=task_prompt, context=context
@@ -290,8 +277,8 @@ class Agent(BaseAgent):
if self.crew and self.crew.knowledge:
knowledge_snippets = self.crew.knowledge.query([task.prompt()])
valid_snippets = [
result["context"]
for result in knowledge_snippets
result["context"]
for result in knowledge_snippets
if result and result.get("context")
]
if valid_snippets:
@@ -412,7 +399,7 @@ class Agent(BaseAgent):
for tool in tools:
if isinstance(tool, CrewAITool):
tools_list.append(tool.to_structured_tool())
tools_list.append(tool.to_langchain())
else:
tools_list.append(tool)
except ModuleNotFoundError:

View File

@@ -19,7 +19,6 @@ from crewai.agents.agent_builder.utilities.base_token_process import TokenProces
from crewai.agents.cache.cache_handler import CacheHandler
from crewai.agents.tools_handler import ToolsHandler
from crewai.tools import BaseTool
from crewai.tools.base_tool import Tool
from crewai.utilities import I18N, Logger, RPMController
from crewai.utilities.config import process_config
@@ -107,7 +106,7 @@ class BaseAgent(ABC, BaseModel):
default=False,
description="Enable agent to delegate and ask questions among each other.",
)
tools: Optional[List[Any]] = Field(
tools: Optional[List[BaseTool]] = Field(
default_factory=list, description="Tools at agents' disposal"
)
max_iter: Optional[int] = Field(
@@ -136,35 +135,6 @@ class BaseAgent(ABC, BaseModel):
def process_model_config(cls, values):
return process_config(values, cls)
@field_validator("tools")
@classmethod
def validate_tools(cls, tools: List[Any]) -> List[BaseTool]:
"""Validate and process the tools provided to the agent.
This method ensures that each tool is either an instance of BaseTool
or an object with 'name', 'func', and 'description' attributes. If the
tool meets these criteria, it is processed and added to the list of
tools. Otherwise, a ValueError is raised.
"""
processed_tools = []
for tool in tools:
if isinstance(tool, BaseTool):
processed_tools.append(tool)
elif (
hasattr(tool, "name")
and hasattr(tool, "func")
and hasattr(tool, "description")
):
# Tool has the required attributes, create a Tool instance
processed_tools.append(Tool.from_langchain(tool))
else:
raise ValueError(
f"Invalid tool type: {type(tool)}. "
"Tool must be an instance of BaseTool or "
"an object with 'name', 'func', and 'description' attributes."
)
return processed_tools
@model_validator(mode="after")
def validate_and_set_attributes(self):
# Validate required fields

View File

@@ -7,7 +7,6 @@ from rich.console import Console
from .constants import AUTH0_AUDIENCE, AUTH0_CLIENT_ID, AUTH0_DOMAIN
from .utils import TokenManager, validate_token
from crewai.cli.tools.main import ToolCommand
console = Console()
@@ -64,22 +63,7 @@ class AuthenticationCommand:
validate_token(token_data["id_token"])
expires_in = 360000 # Token expiration time in seconds
self.token_manager.save_tokens(token_data["access_token"], expires_in)
try:
ToolCommand().login()
except Exception:
console.print(
"\n[bold yellow]Warning:[/bold yellow] Authentication with the Tool Repository failed.",
style="yellow",
)
console.print(
"Other features will work normally, but you may experience limitations "
"with downloading and publishing tools."
"\nRun [bold]crewai login[/bold] to try logging in again.\n",
style="yellow",
)
console.print("\n[bold green]Welcome to CrewAI Enterprise![/bold green]\n")
console.print("\nWelcome to CrewAI+ !!", style="green")
return
if token_data["error"] not in ("authorization_pending", "slow_down"):

View File

@@ -1,10 +0,0 @@
from .utils import TokenManager
def get_auth_token() -> str:
"""Get the authentication token."""
access_token = TokenManager().get_token()
if not access_token:
raise Exception()
return access_token

View File

@@ -2,7 +2,7 @@ import requests
from requests.exceptions import JSONDecodeError
from rich.console import Console
from crewai.cli.plus_api import PlusAPI
from crewai.cli.authentication.token import get_auth_token
from crewai.cli.utils import get_auth_token
from crewai.telemetry.telemetry import Telemetry
console = Console()

View File

@@ -1,7 +1,7 @@
from typing import Optional
import requests
from os import getenv
from crewai.cli.version import get_crewai_version
from crewai.cli.utils import get_crewai_version
from urllib.parse import urljoin

View File

@@ -3,8 +3,7 @@ import subprocess
import click
from packaging import version
from crewai.cli.utils import read_toml
from crewai.cli.version import get_crewai_version
from crewai.cli.utils import get_crewai_version, read_toml
def run_crew() -> None:

View File

@@ -1,5 +1,5 @@
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff
from crewai.project import CrewBase, agent, crew, task
# Uncomment the following line to use an example of a custom tool
# from {{folder_name}}.tools.custom_tool import MyCustomTool
@@ -14,18 +14,6 @@ class {{crew_name}}():
agents_config = 'config/agents.yaml'
tasks_config = 'config/tasks.yaml'
@before_kickoff # Optional hook to be executed before the crew starts
def pull_data_example(self, inputs):
# Example of pulling data from an external API, dynamically changing the inputs
inputs['extra_data'] = "This is extra data"
return inputs
@after_kickoff # Optional hook to be executed after the crew has finished
def log_results(self, output):
# Example of logging results, dynamically changing the output
print(f"Results: {output}")
return output
@agent
def researcher(self) -> Agent:
return Agent(

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<=3.13"
dependencies = [
"crewai[tools]>=0.83.0,<1.0.0"
"crewai[tools]>=0.80.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<=3.13"
dependencies = [
"crewai[tools]>=0.83.0,<1.0.0",
"crewai[tools]>=0.80.0,<1.0.0",
]
[project.scripts]

View File

@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
[tool.poetry.dependencies]
python = ">=3.10,<=3.13"
crewai = { extras = ["tools"], version = ">=0.83.0,<1.0.0" }
crewai = { extras = ["tools"], version = ">=0.80.0,<1.0.0" }
asyncio = "*"
[tool.poetry.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = ["Your Name <you@example.com>"]
requires-python = ">=3.10,<=3.13"
dependencies = [
"crewai[tools]>=0.83.0,<1.0.0"
"crewai[tools]>=0.80.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,6 +5,6 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<=3.13"
dependencies = [
"crewai[tools]>=0.83.0"
"crewai[tools]>=0.80.0"
]

View File

@@ -1,3 +1,4 @@
import importlib.metadata
import os
import shutil
import sys
@@ -8,6 +9,7 @@ import click
import tomli
from rich.console import Console
from crewai.cli.authentication.utils import TokenManager
from crewai.cli.constants import ENV_VARS
if sys.version_info >= (3, 11):
@@ -135,6 +137,11 @@ def _get_nested_value(data: Dict[str, Any], keys: List[str]) -> Any:
return reduce(dict.__getitem__, keys, data)
def get_crewai_version() -> str:
"""Get the version number of CrewAI running the CLI"""
return importlib.metadata.version("crewai")
def fetch_and_json_env_file(env_file_path: str = ".env") -> dict:
"""Fetch the environment variables from a .env file and return them as a dictionary."""
try:
@@ -159,6 +166,14 @@ def fetch_and_json_env_file(env_file_path: str = ".env") -> dict:
return {}
def get_auth_token() -> str:
"""Get the authentication token."""
access_token = TokenManager().get_token()
if not access_token:
raise Exception()
return access_token
def tree_copy(source, destination):
"""Copies the entire directory structure from the source to the destination."""
for item in os.listdir(source):

View File

@@ -1,6 +0,0 @@
import importlib.metadata
def get_crewai_version() -> str:
"""Get the version number of CrewAI running the CLI"""
return importlib.metadata.version("crewai")

View File

@@ -1,6 +1,6 @@
import io
import logging
import sys
import threading
import warnings
from contextlib import contextmanager
from typing import Any, Dict, List, Optional, Union
@@ -13,25 +13,16 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
)
class FilteredStream:
def __init__(self, original_stream):
self._original_stream = original_stream
self._lock = threading.Lock()
def write(self, s) -> int:
with self._lock:
if (
"Give Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new"
in s
or "LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True`"
in s
):
return 0
return self._original_stream.write(s)
def flush(self):
with self._lock:
return self._original_stream.flush()
class FilteredStream(io.StringIO):
def write(self, s):
if (
"Give Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new"
in s
or "LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True`"
in s
):
return
super().write(s)
LLM_CONTEXT_WINDOW_SIZES = {
@@ -69,8 +60,8 @@ def suppress_warnings():
# Redirect stdout and stderr
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = FilteredStream(old_stdout)
sys.stderr = FilteredStream(old_stderr)
sys.stdout = FilteredStream()
sys.stderr = FilteredStream()
try:
yield

View File

@@ -20,10 +20,10 @@ from pydantic import (
from pydantic_core import PydanticCustomError
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.tools.base_tool import BaseTool
from crewai.tasks.output_format import OutputFormat
from crewai.tasks.task_output import TaskOutput
from crewai.telemetry.telemetry import Telemetry
from crewai.tools.base_tool import BaseTool
from crewai.utilities.config import process_config
from crewai.utilities.converter import Converter, convert_to_model
from crewai.utilities.i18n import I18N
@@ -208,9 +208,7 @@ class Task(BaseModel):
"""Execute the task asynchronously."""
future: Future[TaskOutput] = Future()
threading.Thread(
daemon=True,
target=self._execute_task_async,
args=(agent, context, tools, future),
target=self._execute_task_async, args=(agent, context, tools, future)
).start()
return future

View File

@@ -1,12 +1,10 @@
from abc import ABC, abstractmethod
from inspect import signature
from typing import Any, Callable, Type, get_args, get_origin
from pydantic import BaseModel, ConfigDict, Field, create_model, validator
from langchain_core.tools import StructuredTool
from pydantic import BaseModel, ConfigDict, Field, validator
from pydantic import BaseModel as PydanticBaseModel
from crewai.tools.structured_tool import CrewStructuredTool
class BaseTool(BaseModel, ABC):
class _ArgsSchemaPlaceholder(PydanticBaseModel):
@@ -65,10 +63,9 @@ class BaseTool(BaseModel, ABC):
) -> Any:
"""Here goes the actual implementation of the tool."""
def to_structured_tool(self) -> CrewStructuredTool:
"""Convert this tool to a CrewStructuredTool instance."""
def to_langchain(self) -> StructuredTool:
self._set_args_schema()
return CrewStructuredTool(
return StructuredTool(
name=self.name,
description=self.description,
args_schema=self.args_schema,
@@ -76,47 +73,17 @@ class BaseTool(BaseModel, ABC):
)
@classmethod
def from_langchain(cls, tool: Any) -> "BaseTool":
"""Create a Tool instance from a CrewStructuredTool.
This method takes a CrewStructuredTool object and converts it into a
Tool instance. It ensures that the provided tool has a callable 'func'
attribute and infers the argument schema if not explicitly provided.
"""
if not hasattr(tool, "func") or not callable(tool.func):
raise ValueError("The provided tool must have a callable 'func' attribute.")
args_schema = getattr(tool, "args_schema", None)
if args_schema is None:
# Infer args_schema from the function signature if not provided
func_signature = signature(tool.func)
annotations = func_signature.parameters
args_fields = {}
for name, param in annotations.items():
if name != "self":
param_annotation = (
param.annotation if param.annotation != param.empty else Any
)
field_info = Field(
default=...,
description="",
)
args_fields[name] = (param_annotation, field_info)
if args_fields:
args_schema = create_model(f"{tool.name}Input", **args_fields)
else:
# Create a default schema with no fields if no parameters are found
args_schema = create_model(
f"{tool.name}Input", __base__=PydanticBaseModel
)
return cls(
name=getattr(tool, "name", "Unnamed Tool"),
description=getattr(tool, "description", ""),
func=tool.func,
args_schema=args_schema,
)
def from_langchain(cls, tool: StructuredTool) -> "BaseTool":
if cls == Tool:
if tool.func is None:
raise ValueError("StructuredTool must have a callable 'func'")
return Tool(
name=tool.name,
description=tool.description,
args_schema=tool.args_schema,
func=tool.func,
)
raise NotImplementedError(f"from_langchain not implemented for {cls.__name__}")
def _set_args_schema(self):
if self.args_schema is None:
@@ -167,70 +134,17 @@ class BaseTool(BaseModel, ABC):
class Tool(BaseTool):
"""The function that will be executed when the tool is called."""
func: Callable
"""The function that will be executed when the tool is called."""
def _run(self, *args: Any, **kwargs: Any) -> Any:
return self.func(*args, **kwargs)
@classmethod
def from_langchain(cls, tool: Any) -> "Tool":
"""Create a Tool instance from a CrewStructuredTool.
This method takes a CrewStructuredTool object and converts it into a
Tool instance. It ensures that the provided tool has a callable 'func'
attribute and infers the argument schema if not explicitly provided.
Args:
tool (Any): The CrewStructuredTool object to be converted.
Returns:
Tool: A new Tool instance created from the provided CrewStructuredTool.
Raises:
ValueError: If the provided tool does not have a callable 'func' attribute.
"""
if not hasattr(tool, "func") or not callable(tool.func):
raise ValueError("The provided tool must have a callable 'func' attribute.")
args_schema = getattr(tool, "args_schema", None)
if args_schema is None:
# Infer args_schema from the function signature if not provided
func_signature = signature(tool.func)
annotations = func_signature.parameters
args_fields = {}
for name, param in annotations.items():
if name != "self":
param_annotation = (
param.annotation if param.annotation != param.empty else Any
)
field_info = Field(
default=...,
description="",
)
args_fields[name] = (param_annotation, field_info)
if args_fields:
args_schema = create_model(f"{tool.name}Input", **args_fields)
else:
# Create a default schema with no fields if no parameters are found
args_schema = create_model(
f"{tool.name}Input", __base__=PydanticBaseModel
)
return cls(
name=getattr(tool, "name", "Unnamed Tool"),
description=getattr(tool, "description", ""),
func=tool.func,
args_schema=args_schema,
)
def to_langchain(
tools: list[BaseTool | CrewStructuredTool],
) -> list[CrewStructuredTool]:
return [t.to_structured_tool() if isinstance(t, BaseTool) else t for t in tools]
tools: list[BaseTool | StructuredTool],
) -> list[StructuredTool]:
return [t.to_langchain() if isinstance(t, BaseTool) else t for t in tools]
def tool(*args):

View File

@@ -1,7 +1,6 @@
from pydantic import BaseModel, Field
from crewai.agents.cache import CacheHandler
from crewai.tools.structured_tool import CrewStructuredTool
class CacheTools(BaseModel):
@@ -14,7 +13,9 @@ class CacheTools(BaseModel):
)
def tool(self):
return CrewStructuredTool.from_function(
from langchain.tools import StructuredTool
return StructuredTool.from_function(
func=self.hit_cache,
name=self.name,
description="Reads directly from the cache",

View File

@@ -1,242 +0,0 @@
from __future__ import annotations
import inspect
import textwrap
from typing import Any, Callable, Optional, Union, get_type_hints
from pydantic import BaseModel, Field, create_model
from crewai.utilities.logger import Logger
class CrewStructuredTool:
"""A structured tool that can operate on any number of inputs.
This tool intends to replace StructuredTool with a custom implementation
that integrates better with CrewAI's ecosystem.
"""
def __init__(
self,
name: str,
description: str,
args_schema: type[BaseModel],
func: Callable[..., Any],
) -> None:
"""Initialize the structured tool.
Args:
name: The name of the tool
description: A description of what the tool does
args_schema: The pydantic model for the tool's arguments
func: The function to run when the tool is called
"""
self.name = name
self.description = description
self.args_schema = args_schema
self.func = func
self._logger = Logger()
# Validate the function signature matches the schema
self._validate_function_signature()
@classmethod
def from_function(
cls,
func: Callable,
name: Optional[str] = None,
description: Optional[str] = None,
return_direct: bool = False,
args_schema: Optional[type[BaseModel]] = None,
infer_schema: bool = True,
**kwargs: Any,
) -> CrewStructuredTool:
"""Create a tool from a function.
Args:
func: The function to create a tool from
name: The name of the tool. Defaults to the function name
description: The description of the tool. Defaults to the function docstring
return_direct: Whether to return the output directly
args_schema: Optional schema for the function arguments
infer_schema: Whether to infer the schema from the function signature
**kwargs: Additional arguments to pass to the tool
Returns:
A CrewStructuredTool instance
Example:
>>> def add(a: int, b: int) -> int:
... '''Add two numbers'''
... return a + b
>>> tool = CrewStructuredTool.from_function(add)
"""
name = name or func.__name__
description = description or inspect.getdoc(func)
if description is None:
raise ValueError(
f"Function {name} must have a docstring if description not provided."
)
# Clean up the description
description = textwrap.dedent(description).strip()
if args_schema is not None:
# Use provided schema
schema = args_schema
elif infer_schema:
# Infer schema from function signature
schema = cls._create_schema_from_function(name, func)
else:
raise ValueError(
"Either args_schema must be provided or infer_schema must be True."
)
return cls(
name=name,
description=description,
args_schema=schema,
func=func,
)
@staticmethod
def _create_schema_from_function(
name: str,
func: Callable,
) -> type[BaseModel]:
"""Create a Pydantic schema from a function's signature.
Args:
name: The name to use for the schema
func: The function to create a schema from
Returns:
A Pydantic model class
"""
# Get function signature
sig = inspect.signature(func)
# Get type hints
type_hints = get_type_hints(func)
# Create field definitions
fields = {}
for param_name, param in sig.parameters.items():
# Skip self/cls for methods
if param_name in ("self", "cls"):
continue
# Get type annotation
annotation = type_hints.get(param_name, Any)
# Get default value
default = ... if param.default == param.empty else param.default
# Add field
fields[param_name] = (annotation, Field(default=default))
# Create model
schema_name = f"{name.title()}Schema"
return create_model(schema_name, **fields)
def _validate_function_signature(self) -> None:
"""Validate that the function signature matches the args schema."""
sig = inspect.signature(self.func)
schema_fields = self.args_schema.model_fields
# Check required parameters
for param_name, param in sig.parameters.items():
# Skip self/cls for methods
if param_name in ("self", "cls"):
continue
# Skip **kwargs parameters
if param.kind in (
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
):
continue
# Only validate required parameters without defaults
if param.default == inspect.Parameter.empty:
if param_name not in schema_fields:
raise ValueError(
f"Required function parameter '{param_name}' "
f"not found in args_schema"
)
def _parse_args(self, raw_args: Union[str, dict]) -> dict:
"""Parse and validate the input arguments against the schema.
Args:
raw_args: The raw arguments to parse, either as a string or dict
Returns:
The validated arguments as a dictionary
"""
if isinstance(raw_args, str):
try:
import json
raw_args = json.loads(raw_args)
except json.JSONDecodeError as e:
raise ValueError(f"Failed to parse arguments as JSON: {e}")
try:
validated_args = self.args_schema.model_validate(raw_args)
return validated_args.model_dump()
except Exception as e:
raise ValueError(f"Arguments validation failed: {e}")
async def ainvoke(
self,
input: Union[str, dict],
config: Optional[dict] = None,
**kwargs: Any,
) -> Any:
"""Asynchronously invoke the tool.
Args:
input: The input arguments
config: Optional configuration
**kwargs: Additional keyword arguments
Returns:
The result of the tool execution
"""
parsed_args = self._parse_args(input)
if inspect.iscoroutinefunction(self.func):
return await self.func(**parsed_args, **kwargs)
else:
# Run sync functions in a thread pool
import asyncio
return await asyncio.get_event_loop().run_in_executor(
None, lambda: self.func(**parsed_args, **kwargs)
)
def _run(self, *args, **kwargs) -> Any:
"""Legacy method for compatibility."""
# Convert args/kwargs to our expected format
input_dict = dict(zip(self.args_schema.model_fields.keys(), args))
input_dict.update(kwargs)
return self.invoke(input_dict)
def invoke(
self, input: Union[str, dict], config: Optional[dict] = None, **kwargs: Any
) -> Any:
"""Main method for tool execution."""
parsed_args = self._parse_args(input)
return self.func(**parsed_args, **kwargs)
@property
def args(self) -> dict:
"""Get the tool's input arguments schema."""
return self.args_schema.model_json_schema()["properties"]
def __repr__(self) -> str:
return (
f"CrewStructuredTool(name='{self.name}', description='{self.description}')"
)

View File

@@ -11,7 +11,7 @@
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple python dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
"no_tools": "\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
@@ -21,8 +21,7 @@
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
"sumamrize_instruction": "Summarize the following text, make sure to include all the important information: {group}",
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python."
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared."
},
"errors": {
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",

View File

@@ -1,6 +1,6 @@
import json
import re
from typing import Any, Optional, Type, Union, get_args, get_origin
from typing import Any, Optional, Type, Union
from pydantic import BaseModel, ValidationError
@@ -214,38 +214,3 @@ def create_converter(
raise Exception("No output converter found or set.")
return converter
def generate_model_description(model: Type[BaseModel]) -> str:
"""
Generate a string description of a Pydantic model's fields and their types.
This function takes a Pydantic model class and returns a string that describes
the model's fields and their respective types. The description includes handling
of complex types such as `Optional`, `List`, and `Dict`, as well as nested Pydantic
models.
"""
def describe_field(field_type):
origin = get_origin(field_type)
args = get_args(field_type)
if origin is Union and type(None) in args:
non_none_args = [arg for arg in args if arg is not type(None)]
return f"Optional[{describe_field(non_none_args[0])}]"
elif origin is list:
return f"List[{describe_field(args[0])}]"
elif origin is dict:
key_type = describe_field(args[0])
value_type = describe_field(args[1])
return f"Dict[{key_type}, {value_type}]"
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
return generate_model_description(field_type)
else:
return field_type.__name__
fields = model.__annotations__
field_descriptions = [
f'"{name}": {describe_field(type_)}' for name, type_ in fields.items()
]
return "{\n " + ",\n ".join(field_descriptions) + "\n}"

View File

@@ -43,11 +43,10 @@ class TestAuthenticationCommand(unittest.TestCase):
mock_print.assert_any_call("2. Enter the following code: ", "ABCDEF")
mock_open.assert_called_once_with("https://example.com")
@patch("crewai.cli.authentication.main.ToolCommand")
@patch("crewai.cli.authentication.main.requests.post")
@patch("crewai.cli.authentication.main.validate_token")
@patch("crewai.cli.authentication.main.console.print")
def test_poll_for_token_success(self, mock_print, mock_validate_token, mock_post, mock_tool):
def test_poll_for_token_success(self, mock_print, mock_validate_token, mock_post):
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {
@@ -56,13 +55,10 @@ class TestAuthenticationCommand(unittest.TestCase):
}
mock_post.return_value = mock_response
mock_instance = mock_tool.return_value
mock_instance.login.return_value = None
self.auth_command._poll_for_token({"device_code": "123456"})
mock_validate_token.assert_called_once_with("TOKEN")
mock_print.assert_called_once_with("\n[bold green]Welcome to CrewAI Enterprise![/bold green]\n")
mock_print.assert_called_once_with("\nWelcome to CrewAI+ !!", style="green")
@patch("crewai.cli.authentication.main.requests.post")
@patch("crewai.cli.authentication.main.console.print")

View File

@@ -260,6 +260,6 @@ class TestDeployCommand(unittest.TestCase):
self.assertEqual(project_name, "test_project")
def test_get_crewai_version(self):
from crewai.cli.version import get_crewai_version
from crewai.cli.utils import get_crewai_version
assert isinstance(get_crewai_version(), str)

View File

@@ -1,5 +1,4 @@
from typing import Callable
from crewai.tools import BaseTool, tool
@@ -22,7 +21,8 @@ def test_creating_a_tool_using_annotation():
my_tool.func("What is the meaning of life?") == "What is the meaning of life?"
)
converted_tool = my_tool.to_structured_tool()
# Assert the langchain tool conversion worked as expected
converted_tool = my_tool.to_langchain()
assert converted_tool.name == "Name of my tool"
assert (
@@ -41,7 +41,9 @@ def test_creating_a_tool_using_annotation():
def test_creating_a_tool_using_baseclass():
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it."
)
def _run(self, question: str) -> str:
return question
@@ -59,7 +61,8 @@ def test_creating_a_tool_using_baseclass():
}
assert my_tool.run("What is the meaning of life?") == "What is the meaning of life?"
converted_tool = my_tool.to_structured_tool()
# Assert the langchain tool conversion worked as expected
converted_tool = my_tool.to_langchain()
assert converted_tool.name == "Name of my tool"
assert (
@@ -70,7 +73,7 @@ def test_creating_a_tool_using_baseclass():
"question": {"title": "Question", "type": "string"}
}
assert (
converted_tool._run("What is the meaning of life?")
converted_tool.run("What is the meaning of life?")
== "What is the meaning of life?"
)
@@ -78,7 +81,9 @@ def test_creating_a_tool_using_baseclass():
def test_setting_cache_function():
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it."
)
cache_function: Callable = lambda: False
def _run(self, question: str) -> str:
@@ -92,7 +97,9 @@ def test_setting_cache_function():
def test_default_cache_function_is_true():
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it."
)
def _run(self, question: str) -> str:
return question

View File

@@ -1,146 +0,0 @@
from typing import Optional
import pytest
from pydantic import BaseModel, Field
from crewai.tools.structured_tool import CrewStructuredTool
# Test fixtures
@pytest.fixture
def basic_function():
def test_func(param1: str, param2: int = 0) -> str:
"""Test function with basic params."""
return f"{param1} {param2}"
return test_func
@pytest.fixture
def schema_class():
class TestSchema(BaseModel):
param1: str
param2: int = Field(default=0)
return TestSchema
class TestCrewStructuredTool:
def test_initialization(self, basic_function, schema_class):
"""Test basic initialization of CrewStructuredTool"""
tool = CrewStructuredTool(
name="test_tool",
description="Test tool description",
func=basic_function,
args_schema=schema_class,
)
assert tool.name == "test_tool"
assert tool.description == "Test tool description"
assert tool.func == basic_function
assert tool.args_schema == schema_class
def test_from_function(self, basic_function):
"""Test creating tool from function"""
tool = CrewStructuredTool.from_function(
func=basic_function, name="test_tool", description="Test description"
)
assert tool.name == "test_tool"
assert tool.description == "Test description"
assert tool.func == basic_function
assert isinstance(tool.args_schema, type(BaseModel))
def test_validate_function_signature(self, basic_function, schema_class):
"""Test function signature validation"""
tool = CrewStructuredTool(
name="test_tool",
description="Test tool",
func=basic_function,
args_schema=schema_class,
)
# Should not raise any exceptions
tool._validate_function_signature()
@pytest.mark.asyncio
async def test_ainvoke(self, basic_function):
"""Test asynchronous invocation"""
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
result = await tool.ainvoke(input={"param1": "test"})
assert result == "test 0"
def test_parse_args_dict(self, basic_function):
"""Test parsing dictionary arguments"""
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
parsed = tool._parse_args({"param1": "test", "param2": 42})
assert parsed["param1"] == "test"
assert parsed["param2"] == 42
def test_parse_args_string(self, basic_function):
"""Test parsing string arguments"""
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
parsed = tool._parse_args('{"param1": "test", "param2": 42}')
assert parsed["param1"] == "test"
assert parsed["param2"] == 42
def test_complex_types(self):
"""Test handling of complex parameter types"""
def complex_func(nested: dict, items: list) -> str:
"""Process complex types."""
return f"Processed {len(items)} items with {len(nested)} nested keys"
tool = CrewStructuredTool.from_function(
func=complex_func, name="test_tool", description="Test complex types"
)
result = tool.invoke({"nested": {"key": "value"}, "items": [1, 2, 3]})
assert result == "Processed 3 items with 1 nested keys"
def test_schema_inheritance(self):
"""Test tool creation with inherited schema"""
def extended_func(base_param: str, extra_param: int) -> str:
"""Test function with inherited schema."""
return f"{base_param} {extra_param}"
class BaseSchema(BaseModel):
base_param: str
class ExtendedSchema(BaseSchema):
extra_param: int
tool = CrewStructuredTool.from_function(
func=extended_func, name="test_tool", args_schema=ExtendedSchema
)
result = tool.invoke({"base_param": "test", "extra_param": 42})
assert result == "test 42"
def test_default_values_in_schema(self):
"""Test handling of default values in schema"""
def default_func(
required_param: str,
optional_param: str = "default",
nullable_param: Optional[int] = None,
) -> str:
"""Test function with default values."""
return f"{required_param} {optional_param} {nullable_param}"
tool = CrewStructuredTool.from_function(
func=default_func, name="test_tool", description="Test defaults"
)
# Test with minimal parameters
result = tool.invoke({"required_param": "test"})
assert result == "test default None"
# Test with all parameters
result = tool.invoke(
{"required_param": "test", "optional_param": "custom", "nullable_param": 42}
)
assert result == "test custom 42"

View File

@@ -1,10 +1,7 @@
import json
from typing import Dict, List, Optional
from unittest.mock import MagicMock, Mock, patch
import pytest
from pydantic import BaseModel
from crewai.llm import LLM
from crewai.utilities.converter import (
Converter,
@@ -12,11 +9,12 @@ from crewai.utilities.converter import (
convert_to_model,
convert_with_instructions,
create_converter,
generate_model_description,
get_conversion_instructions,
handle_partial_json,
validate_model,
)
from pydantic import BaseModel
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
@@ -271,45 +269,3 @@ def test_create_converter_fails_without_agent_or_converter_cls():
create_converter(
llm=Mock(), text="Sample", model=SimpleModel, instructions="Convert"
)
def test_generate_model_description_simple_model():
description = generate_model_description(SimpleModel)
expected_description = '{\n "name": str,\n "age": int\n}'
assert description == expected_description
def test_generate_model_description_nested_model():
description = generate_model_description(NestedModel)
expected_description = (
'{\n "id": int,\n "data": {\n "name": str,\n "age": int\n}\n}'
)
assert description == expected_description
def test_generate_model_description_optional_field():
class ModelWithOptionalField(BaseModel):
name: Optional[str]
age: int
description = generate_model_description(ModelWithOptionalField)
expected_description = '{\n "name": Optional[str],\n "age": int\n}'
assert description == expected_description
def test_generate_model_description_list_field():
class ModelWithListField(BaseModel):
items: List[int]
description = generate_model_description(ModelWithListField)
expected_description = '{\n "items": List[int]\n}'
assert description == expected_description
def test_generate_model_description_dict_field():
class ModelWithDictField(BaseModel):
attributes: Dict[str, int]
description = generate_model_description(ModelWithDictField)
expected_description = '{\n "attributes": Dict[str, int]\n}'
assert description == expected_description

15
uv.lock generated
View File

@@ -608,7 +608,7 @@ wheels = [
[[package]]
name = "crewai"
version = "0.83.0"
version = "0.80.0"
source = { editable = "." }
dependencies = [
{ name = "appdirs" },
@@ -619,13 +619,12 @@ dependencies = [
{ name = "instructor" },
{ name = "json-repair" },
{ name = "jsonref" },
{ name = "langchain" },
{ name = "litellm" },
{ name = "openai" },
{ name = "openpyxl" },
{ name = "opentelemetry-api" },
{ name = "opentelemetry-exporter-otlp-proto-http" },
{ name = "opentelemetry-sdk" },
{ name = "pdfplumber" },
{ name = "pydantic" },
{ name = "python-dotenv" },
{ name = "pyvis" },
@@ -642,9 +641,6 @@ agentops = [
fastembed = [
{ name = "fastembed" },
]
mem0 = [
{ name = "mem0ai" },
]
openpyxl = [
{ name = "openpyxl" },
]
@@ -654,6 +650,9 @@ pandas = [
pdfplumber = [
{ name = "pdfplumber" },
]
mem0 = [
{ name = "mem0ai" },
]
tools = [
{ name = "crewai-tools" },
]
@@ -691,16 +690,15 @@ requires-dist = [
{ name = "instructor", specifier = ">=1.3.3" },
{ name = "json-repair", specifier = ">=0.25.2" },
{ name = "jsonref", specifier = ">=1.1.0" },
{ name = "langchain", specifier = ">=0.2.16" },
{ name = "litellm", specifier = ">=1.44.22" },
{ name = "mem0ai", marker = "extra == 'mem0'", specifier = ">=0.1.29" },
{ name = "openai", specifier = ">=1.13.3" },
{ name = "openpyxl", specifier = ">=3.1.5" },
{ name = "openpyxl", marker = "extra == 'openpyxl'", specifier = ">=3.1.5" },
{ name = "opentelemetry-api", specifier = ">=1.22.0" },
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.22.0" },
{ name = "opentelemetry-sdk", specifier = ">=1.22.0" },
{ name = "pandas", marker = "extra == 'pandas'", specifier = ">=2.2.3" },
{ name = "pdfplumber", specifier = ">=0.11.4" },
{ name = "pdfplumber", marker = "extra == 'pdfplumber'", specifier = ">=0.11.4" },
{ name = "pydantic", specifier = ">=2.4.2" },
{ name = "python-dotenv", specifier = ">=1.0.0" },
@@ -954,6 +952,7 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/c1/8b/5fe2cc11fee489817272089c4203e679c63b570a5aaeb18d852ae3cbba6a/et_xmlfile-2.0.0-py3-none-any.whl", hash = "sha256:7a91720bc756843502c3b7504c77b8fe44217c85c537d85037f0f536151b2caa", size = 18059 },
]
[[package]]
name = "exceptiongroup"
version = "1.2.2"