mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 07:38:29 +00:00
Compare commits
7 Commits
devin/1742
...
devin/1742
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f121ba5f2 | ||
|
|
1eb6f3b470 | ||
|
|
82fad3f935 | ||
|
|
21a91f9998 | ||
|
|
fe0813e831 | ||
|
|
33cebea15b | ||
|
|
e723e5ca3f |
@@ -152,6 +152,12 @@ If you encounter issues during installation or usage, here are some common solut
|
||||
- Try upgrading pip: `pip install --upgrade pip`
|
||||
- If issues persist, use a pre-built wheel: `pip install tiktoken --prefer-binary`
|
||||
|
||||
3. **ModuleNotFoundError: No module named 'packaging.licenses'**
|
||||
- This error occurs when installing with `uv` (v0.1.0 and above) due to newer `setuptools` versions
|
||||
- Fix by downgrading setuptools: `pip install setuptools<=65.5.0`
|
||||
- Then install CrewAI: `uv pip install crewai`
|
||||
- Note: This is a temporary workaround until compatibility with `setuptools` is resolved
|
||||
|
||||
### 2. Setting Up Your Crew with the YAML Configuration
|
||||
|
||||
To create a new CrewAI project, run the following CLI (Command Line Interface) command:
|
||||
|
||||
@@ -115,6 +115,7 @@
|
||||
"concepts/testing",
|
||||
"concepts/cli",
|
||||
"concepts/tools",
|
||||
"concepts/event-listener",
|
||||
"concepts/langchain-tools",
|
||||
"concepts/llamaindex-tools"
|
||||
]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "crewai"
|
||||
version = "0.105.0"
|
||||
version = "0.108.0"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
@@ -96,5 +96,6 @@ exclude = ["cli/templates"]
|
||||
exclude_dirs = ["src/crewai/cli/templates"]
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
# Pin setuptools version to avoid packaging.licenses dependency issues with UV package manager
|
||||
requires = ["hatchling", "setuptools>=64.0.0,<=65.5.0"] # Explicit version range for compatibility
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
@@ -14,7 +14,7 @@ warnings.filterwarnings(
|
||||
category=UserWarning,
|
||||
module="pydantic.main",
|
||||
)
|
||||
__version__ = "0.105.0"
|
||||
__version__ = "0.108.0"
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"Crew",
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.105.0,<1.0.0"
|
||||
"crewai[tools]>=0.108.0,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.105.0,<1.0.0",
|
||||
"crewai[tools]>=0.108.0,<1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.105.0"
|
||||
"crewai[tools]>=0.108.0"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -1,11 +1,7 @@
|
||||
import json
|
||||
import logging
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from inspect import signature
|
||||
from typing import Any, Callable, Dict, Optional, Type, Union, get_args, get_origin
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from typing import Any, Callable, Type, get_args, get_origin
|
||||
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
@@ -79,93 +75,6 @@ class BaseTool(BaseModel, ABC):
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Here goes the actual implementation of the tool."""
|
||||
|
||||
def invoke(
|
||||
self, input: Union[str, dict], config: Optional[dict] = None, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Main method for tool execution.
|
||||
|
||||
This method provides a fallback implementation for models that don't support
|
||||
function calling natively (like QwQ-32B-Preview and deepseek-chat).
|
||||
It parses the input and calls the _run method with the appropriate arguments.
|
||||
|
||||
Args:
|
||||
input: Either a string (raw or JSON) or a dictionary of arguments
|
||||
config: Optional configuration dictionary
|
||||
**kwargs: Additional keyword arguments to pass to _run
|
||||
|
||||
Returns:
|
||||
The result of calling the tool's _run method
|
||||
|
||||
Raises:
|
||||
ValueError: If input is neither a string nor a dictionary
|
||||
ValueError: If input exceeds the maximum allowed size
|
||||
ValueError: If input contains nested dictionaries beyond the maximum allowed depth
|
||||
"""
|
||||
# Input type validation
|
||||
if not isinstance(input, (str, dict)):
|
||||
raise ValueError(f"Input must be string or dict, got {type(input)}")
|
||||
|
||||
# Input size validation (limit to 100KB)
|
||||
MAX_INPUT_SIZE = 100 * 1024 # 100KB
|
||||
if isinstance(input, str) and len(input.encode('utf-8')) > MAX_INPUT_SIZE:
|
||||
logger.warning(f"Input string exceeds maximum size of {MAX_INPUT_SIZE} bytes")
|
||||
raise ValueError(f"Input string exceeds maximum size of {MAX_INPUT_SIZE} bytes")
|
||||
|
||||
if isinstance(input, str):
|
||||
# Try to parse as JSON if it's a string
|
||||
try:
|
||||
input = json.loads(input)
|
||||
logger.debug(f"Successfully parsed JSON input: {input}")
|
||||
except json.JSONDecodeError as e:
|
||||
# If not valid JSON, pass as a single argument
|
||||
logger.debug(f"Input string is not JSON format: {e}")
|
||||
return self._run(input)
|
||||
|
||||
if not isinstance(input, dict):
|
||||
# If input is not a dict after parsing, pass it directly
|
||||
logger.debug(f"Using non-dict input directly: {input}")
|
||||
return self._run(input)
|
||||
|
||||
# Validate nested dictionary depth
|
||||
MAX_DEPTH = 5
|
||||
def check_depth(obj, current_depth=1):
|
||||
if current_depth > MAX_DEPTH:
|
||||
return False
|
||||
if isinstance(obj, dict):
|
||||
return all(check_depth(v, current_depth + 1) for v in obj.values())
|
||||
elif isinstance(obj, (list, tuple)):
|
||||
return all(check_depth(item, current_depth + 1) for item in obj)
|
||||
return True
|
||||
|
||||
if not check_depth(input):
|
||||
logger.warning(f"Input contains nested structures beyond maximum depth of {MAX_DEPTH}")
|
||||
raise ValueError(f"Input contains nested structures beyond maximum depth of {MAX_DEPTH}")
|
||||
|
||||
# Get the expected arguments from the schema
|
||||
if hasattr(self, 'args_schema') and self.args_schema is not None:
|
||||
try:
|
||||
# Extract argument names from the schema
|
||||
arg_names = list(self.args_schema.model_json_schema()["properties"].keys())
|
||||
|
||||
# Filter the input to only include valid arguments
|
||||
filtered_args = {}
|
||||
for k in input.keys():
|
||||
if k in arg_names:
|
||||
filtered_args[k] = input[k]
|
||||
else:
|
||||
logger.warning(f"Ignoring unexpected argument: {k}")
|
||||
|
||||
logger.debug(f"Calling _run with filtered arguments: {filtered_args}")
|
||||
# Call _run with the filtered arguments
|
||||
return self._run(**filtered_args)
|
||||
except Exception as e:
|
||||
# Fallback to passing the entire input dict if schema parsing fails
|
||||
logger.warning(f"Schema parsing failed, using raw input: {e}")
|
||||
|
||||
# If we couldn't parse the schema or there was an error, just pass the input dict
|
||||
logger.debug(f"Calling _run with unfiltered arguments: {input}")
|
||||
return self._run(**input)
|
||||
|
||||
def to_structured_tool(self) -> CrewStructuredTool:
|
||||
"""Convert this tool to a CrewStructuredTool instance."""
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
from .base_events import CrewEvent
|
||||
|
||||
@@ -52,9 +52,11 @@ class MethodExecutionFailedEvent(FlowEvent):
|
||||
|
||||
flow_name: str
|
||||
method_name: str
|
||||
error: Any
|
||||
error: Exception
|
||||
type: str = "method_execution_failed"
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
|
||||
class FlowFinishedEvent(FlowEvent):
|
||||
"""Event emitted when a flow completes execution"""
|
||||
|
||||
80
tests/installation/test_dependency_compatibility.py
Normal file
80
tests/installation/test_dependency_compatibility.py
Normal file
@@ -0,0 +1,80 @@
|
||||
"""
|
||||
Test module for verifying dependency compatibility with different package managers.
|
||||
These tests ensure that critical dependencies can be installed without conflicts.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def temporary_package_environment():
|
||||
"""Create an isolated environment for package testing.
|
||||
|
||||
This context manager creates a temporary directory where package installations
|
||||
can be tested in isolation, then cleans up afterward.
|
||||
"""
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
old_cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(temp_dir)
|
||||
yield temp_dir
|
||||
finally:
|
||||
os.chdir(old_cwd)
|
||||
shutil.rmtree(temp_dir, ignore_errors=True)
|
||||
|
||||
|
||||
def test_pypika_installation():
|
||||
"""Test that pypika can be installed without packaging.licenses errors.
|
||||
|
||||
This test verifies that pypika 0.48.9 (a dependency of chromadb, which is a
|
||||
dependency of CrewAI) can be installed without errors related to the
|
||||
packaging.licenses module when using the UV package manager.
|
||||
"""
|
||||
# Check if uv is available
|
||||
uv_path = shutil.which("uv")
|
||||
if not uv_path:
|
||||
pytest.skip("UV package manager not available, skipping test")
|
||||
|
||||
# Use isolated environment for testing
|
||||
with temporary_package_environment():
|
||||
# Install pypika using uv
|
||||
result = subprocess.run(
|
||||
["uv", "pip", "install", "pypika==0.48.9", "--no-deps"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
assert result.returncode == 0, f"Failed to install pypika: {result.stderr}\nCommand output: {result.stdout}"
|
||||
|
||||
|
||||
def test_chromadb_installation():
|
||||
"""Test that chromadb can be installed without packaging.licenses errors.
|
||||
|
||||
This test verifies that chromadb (a direct dependency of CrewAI) can be
|
||||
installed without errors related to the packaging.licenses module when
|
||||
using the UV package manager.
|
||||
"""
|
||||
# Skip this test if running in CI/CD to avoid long test times
|
||||
if "CI" in os.environ:
|
||||
pytest.skip("Skipping in CI environment to reduce test time")
|
||||
|
||||
# Check if uv is available
|
||||
uv_path = shutil.which("uv")
|
||||
if not uv_path:
|
||||
pytest.skip("UV package manager not available, skipping test")
|
||||
|
||||
# Use isolated environment for testing
|
||||
with temporary_package_environment():
|
||||
# Install chromadb using uv
|
||||
result = subprocess.run(
|
||||
["uv", "pip", "install", "chromadb>=0.5.23", "--no-deps"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
assert result.returncode == 0, f"Failed to install chromadb: {result.stderr}\nCommand output: {result.stdout}"
|
||||
@@ -1,55 +0,0 @@
|
||||
from typing import Type
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
|
||||
class TestToolInput(BaseModel):
|
||||
param: str = Field(description="A test parameter")
|
||||
|
||||
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A tool for testing the invoke method"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, param: str) -> str:
|
||||
return f"Tool executed with: {param}"
|
||||
|
||||
|
||||
def test_invoke_with_dict():
|
||||
"""Test that invoke works with a dictionary input."""
|
||||
tool = TestTool()
|
||||
result = tool.invoke(input={"param": "test value"})
|
||||
assert result == "Tool executed with: test value"
|
||||
|
||||
|
||||
def test_invoke_with_json_string():
|
||||
"""Test that invoke works with a JSON string input."""
|
||||
tool = TestTool()
|
||||
result = tool.invoke(input='{"param": "test value"}')
|
||||
assert result == "Tool executed with: test value"
|
||||
|
||||
|
||||
def test_invoke_with_raw_string():
|
||||
"""Test that invoke works with a raw string input."""
|
||||
tool = TestTool()
|
||||
result = tool.invoke(input="test value")
|
||||
assert result == "Tool executed with: test value"
|
||||
|
||||
|
||||
def test_invoke_with_empty_dict():
|
||||
"""Test that invoke handles empty dict input appropriately."""
|
||||
tool = TestTool()
|
||||
with pytest.raises(Exception):
|
||||
# Should raise an exception since param is required
|
||||
tool.invoke(input={})
|
||||
|
||||
|
||||
def test_invoke_with_extra_args():
|
||||
"""Test that invoke filters out extra arguments not in the schema."""
|
||||
tool = TestTool()
|
||||
result = tool.invoke(input={"param": "test value", "extra": "ignored"})
|
||||
assert result == "Tool executed with: test value"
|
||||
@@ -1,69 +0,0 @@
|
||||
from typing import Type
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
|
||||
class TestToolInput(BaseModel):
|
||||
param: str = Field(description="A test parameter")
|
||||
|
||||
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A tool for testing the invoke method"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, param: str) -> str:
|
||||
return f"Tool executed with: {param}"
|
||||
|
||||
|
||||
def test_invoke_with_invalid_type():
|
||||
"""Test that invoke raises ValueError with invalid input types."""
|
||||
tool = TestTool()
|
||||
with pytest.raises(ValueError, match="Input must be string or dict"):
|
||||
tool.invoke(input=123)
|
||||
|
||||
with pytest.raises(ValueError, match="Input must be string or dict"):
|
||||
tool.invoke(input=["list", "not", "allowed"])
|
||||
|
||||
with pytest.raises(ValueError, match="Input must be string or dict"):
|
||||
tool.invoke(input=None)
|
||||
|
||||
|
||||
def test_invoke_with_config():
|
||||
"""Test that invoke properly handles configuration dictionaries."""
|
||||
tool = TestTool()
|
||||
# Config should be passed through to _run but not affect the result
|
||||
result = tool.invoke(input={"param": "test with config"}, config={"timeout": 30})
|
||||
assert result == "Tool executed with: test with config"
|
||||
|
||||
|
||||
def test_invoke_with_malformed_json():
|
||||
"""Test that invoke handles malformed JSON gracefully."""
|
||||
tool = TestTool()
|
||||
# Malformed JSON should be treated as a raw string
|
||||
result = tool.invoke(input="{param: this is not valid JSON}")
|
||||
assert "this is not valid JSON" in result
|
||||
|
||||
|
||||
def test_invoke_with_nested_dict():
|
||||
"""Test that invoke handles nested dictionaries properly."""
|
||||
class NestedToolInput(BaseModel):
|
||||
config: dict = Field(description="A nested configuration dictionary")
|
||||
|
||||
class NestedTool(BaseTool):
|
||||
name: str = "Nested Tool"
|
||||
description: str = "A tool for testing nested dictionaries"
|
||||
args_schema: Type[BaseModel] = NestedToolInput
|
||||
|
||||
def _run(self, config: dict) -> str:
|
||||
return f"Tool executed with nested config: {config}"
|
||||
|
||||
tool = NestedTool()
|
||||
nested_input = {"config": {"key1": "value1", "key2": {"nested": "value"}}}
|
||||
result = tool.invoke(input=nested_input)
|
||||
assert "Tool executed with nested config" in result
|
||||
assert "key1" in result
|
||||
assert "nested" in result
|
||||
Reference in New Issue
Block a user