Compare commits

..

8 Commits

Author SHA1 Message Date
Lucas Gomide
5a3b94c84b feat: upgrade fastavro, pyarrow and lancedb 2025-06-02 18:20:41 -03:00
Lucas Gomide
5307055ae6 build: attempt to build PyTorch on Python 3.13 2025-06-02 18:05:02 -03:00
Lucas Gomide
41925a7728 build: drop fastembed is not longer used 2025-06-02 17:27:35 -03:00
Lucas Gomide
6ebfb57f9e build: explicit tokenizers dependency
Added explicit tokenizers dependency: Added tokenizers>=0.20.3 to ensure a version compatible with Python 3.13 is used.
2025-06-02 17:26:53 -03:00
Lucas Gomide
db316e55b2 build: adds requires python <3.14 2025-06-02 17:21:34 -03:00
Lucas Gomide
4a7b5ef93f docs: update docs about support python version 2025-06-02 17:20:53 -03:00
Lucas Gomide
7d15b29df8 ci: support python 3.13 on CI 2025-06-02 17:20:31 -03:00
VirenG
c045399d6b Update README.md (#2923)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
Added 'Multi-AI Agent' phrase for giving more clarity to key features section in clause 3 in README.md
2025-05-31 21:39:42 -07:00
12 changed files with 890 additions and 1122 deletions

View File

@@ -14,7 +14,7 @@ jobs:
timeout-minutes: 15
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12']
python-version: ['3.10', '3.11', '3.12', '3.13']
steps:
- name: Checkout code
uses: actions/checkout@v4

View File

@@ -403,7 +403,7 @@ In addition to the sequential process, you can use the hierarchical process, whi
## Key Features
CrewAI stands apart as a lean, standalone, high-performance framework delivering simplicity, flexibility, and precise control—free from the complexity and limitations found in other agent frameworks.
CrewAI stands apart as a lean, standalone, high-performance multi-AI Agent framework delivering simplicity, flexibility, and precise control—free from the complexity and limitations found in other agent frameworks.
- **Standalone & Lean**: Completely independent from other frameworks like LangChain, offering faster execution and lighter resource demands.
- **Flexible & Precise**: Easily orchestrate autonomous agents through intuitive [Crews](https://docs.crewai.com/concepts/crews) or precise [Flows](https://docs.crewai.com/concepts/flows), achieving perfect balance for your needs.

View File

@@ -22,7 +22,7 @@ Watch this video tutorial for a step-by-step demonstration of the installation p
<Note>
**Python Version Requirements**
CrewAI requires `Python >=3.10 and <3.13`. Here's how to check your version:
CrewAI requires `Python >=3.10 and <=3.13`. Here's how to check your version:
```bash
python3 --version
```

View File

@@ -3,7 +3,7 @@ name = "crewai"
version = "0.121.1"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
readme = "README.md"
requires-python = ">=3.10,<3.13"
requires-python = ">=3.10,<3.14"
authors = [
{ name = "Joao Moura", email = "joao@crewai.com" }
]
@@ -22,6 +22,8 @@ dependencies = [
"opentelemetry-exporter-otlp-proto-http>=1.30.0",
# Data Handling
"chromadb>=0.5.23",
"tokenizers>=0.20.3",
"onnxruntime==1.22.0",
"openpyxl>=3.1.5",
"pyvis>=0.3.2",
# Authentication and Security
@@ -50,7 +52,6 @@ embeddings = [
"tiktoken~=0.7.0"
]
agentops = ["agentops>=0.3.0"]
fastembed = ["fastembed>=0.4.1"]
pdfplumber = [
"pdfplumber>=0.11.4",
]
@@ -100,6 +101,27 @@ exclude = ["cli/templates"]
[tool.bandit]
exclude_dirs = ["src/crewai/cli/templates"]
# PyTorch index configuration, since torch 2.5.0 is not compatible with python 3.13
[[tool.uv.index]]
name = "pytorch-nightly"
url = "https://download.pytorch.org/whl/nightly/cpu"
explicit = true
[[tool.uv.index]]
name = "pytorch"
url = "https://download.pytorch.org/whl/cpu"
explicit = true
[tool.uv.sources]
torch = [
{ index = "pytorch-nightly", marker = "python_version >= '3.13'" },
{ index = "pytorch", marker = "python_version < '3.13'" },
]
torchvision = [
{ index = "pytorch-nightly", marker = "python_version >= '3.13'" },
{ index = "pytorch", marker = "python_version < '3.13'" },
]
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

View File

@@ -1,93 +0,0 @@
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
from .base_embedder import BaseEmbedder
try:
from fastembed_gpu import TextEmbedding # type: ignore
FASTEMBED_AVAILABLE = True
except ImportError:
try:
from fastembed import TextEmbedding
FASTEMBED_AVAILABLE = True
except ImportError:
FASTEMBED_AVAILABLE = False
class FastEmbed(BaseEmbedder):
"""
A wrapper class for text embedding models using FastEmbed
"""
def __init__(
self,
model_name: str = "BAAI/bge-small-en-v1.5",
cache_dir: Optional[Union[str, Path]] = None,
):
"""
Initialize the embedding model
Args:
model_name: Name of the model to use
cache_dir: Directory to cache the model
gpu: Whether to use GPU acceleration
"""
if not FASTEMBED_AVAILABLE:
raise ImportError(
"FastEmbed is not installed. Please install it with: "
"uv pip install fastembed or uv pip install fastembed-gpu for GPU support"
)
self.model = TextEmbedding(
model_name=model_name,
cache_dir=str(cache_dir) if cache_dir else None,
)
def embed_chunks(self, chunks: List[str]) -> List[np.ndarray]:
"""
Generate embeddings for a list of text chunks
Args:
chunks: List of text chunks to embed
Returns:
List of embeddings
"""
embeddings = list(self.model.embed(chunks))
return embeddings
def embed_texts(self, texts: List[str]) -> List[np.ndarray]:
"""
Generate embeddings for a list of texts
Args:
texts: List of texts to embed
Returns:
List of embeddings
"""
embeddings = list(self.model.embed(texts))
return embeddings
def embed_text(self, text: str) -> np.ndarray:
"""
Generate embedding for a single text
Args:
text: Text to embed
Returns:
Embedding array
"""
return self.embed_texts([text])[0]
@property
def dimension(self) -> int:
"""Get the dimension of the embeddings"""
# Generate a test embedding to get dimensions
test_embed = self.embed_text("test")
return len(test_embed)

View File

@@ -68,7 +68,7 @@ def to_serializable(
_current_depth=_current_depth + 1,
)
else:
return str(obj)
return repr(obj)
def _to_serializable_key(key: Any) -> str:

View File

@@ -1,38 +1,10 @@
import re
from typing import Any, Dict, Optional
SUPPORTED_PRIMITIVE_TYPES = (str, int, float, bool)
SUPPORTED_CONTAINER_TYPES = (dict, list)
SUPPORTED_TYPES = SUPPORTED_PRIMITIVE_TYPES + SUPPORTED_CONTAINER_TYPES
def _validate_input_type(val: Any) -> None:
"""Validates input types recursively (str, int, float, bool, dict, list).
Args:
val: The value to validate
Raises:
ValueError: If the value contains unsupported types
"""
if val is None:
return
if isinstance(val, SUPPORTED_PRIMITIVE_TYPES):
return
if isinstance(val, SUPPORTED_CONTAINER_TYPES):
for item in val.values() if isinstance(val, dict) else val:
_validate_input_type(item)
return
raise ValueError(
f"Unsupported type {type(val).__name__} in inputs. "
"Only str, int, float, bool, dict, and list are allowed."
)
from typing import Any, Dict, List, Optional, Union
def interpolate_only(
input_string: Optional[str],
inputs: Dict[str, Any],
raise_on_missing: bool = True,
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]],
) -> str:
"""Interpolate placeholders (e.g., {key}) in a string while leaving JSON untouched.
Only interpolates placeholders that follow the pattern {variable_name} where
@@ -53,27 +25,27 @@ def interpolate_only(
ValueError: If a value contains unsupported types or a template variable is missing
"""
from crewai.utilities.serialization import to_serializable
processed_inputs = {}
# Validation function for recursive type checking
def validate_type(value: Any) -> None:
if value is None:
return
if isinstance(value, (str, int, float, bool)):
return
if isinstance(value, (dict, list)):
for item in value.values() if isinstance(value, dict) else value:
validate_type(item)
return
raise ValueError(
f"Unsupported type {type(value).__name__} in inputs. "
"Only str, int, float, bool, dict, and list are allowed."
)
# Validate all input values
for key, value in inputs.items():
if value is None or isinstance(value, SUPPORTED_TYPES):
try:
_validate_input_type(value)
processed_inputs[key] = value
except ValueError as e:
raise ValueError(f"Invalid value for key '{key}': {str(e)}") from e
else:
try:
processed_inputs[key] = to_serializable(value)
except Exception as e:
raise ValueError(
f"Invalid value for key '{key}': Unable to serialize {type(value).__name__}. "
f"Serialization error: {str(e)}"
)
inputs = processed_inputs
try:
validate_type(value)
except ValueError as e:
raise ValueError(f"Invalid value for key '{key}': {str(e)}") from e
if input_string is None or not input_string:
return ""

File diff suppressed because one or more lines are too long

View File

@@ -4566,96 +4566,3 @@ def test_reset_agent_knowledge_with_only_agent_knowledge(researcher,writer):
mock_reset_agent_knowledge.assert_called_once_with([mock_ks_research,mock_ks_writer])
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_kickoff_with_pandas_dataframe():
"""Test that crew.kickoff works with pandas DataFrame inputs."""
import pandas as pd
df = pd.DataFrame({
"name": ["Alice", "Bob", "Charlie"],
"age": [25, 30, 35],
"city": ["New York", "London", "Tokyo"]
})
agent = Agent(
role="Data Analyst",
goal="Analyze the provided data",
backstory="You are an expert data analyst",
)
task = Task(
description="Analyze this dataset: {data}",
expected_output="A brief summary of the data",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff(inputs={"data": df})
assert result is not None
assert "Alice" in str(result) or "Bob" in str(result)
def test_crew_inputs_interpolate_with_dataframe():
"""Test that input interpolation works with pandas DataFrames."""
import pandas as pd
df = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
agent = Agent(
role="Analyst",
goal="Process {data_type} data",
backstory="Expert in {data_type} analysis",
)
task = Task(
description="Process this data: {dataset}",
expected_output="Analysis of {dataset}",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
inputs = {"data_type": "tabular", "dataset": df}
crew._interpolate_inputs(inputs=inputs)
assert "tabular" in crew.agents[0].goal
assert "tabular" in crew.agents[0].backstory
assert str(df) in crew.tasks[0].description
assert str(df) in crew.tasks[0].expected_output
def test_crew_inputs_interpolate_mixed_types_with_dataframe():
"""Test input interpolation with mixed types including DataFrames."""
import pandas as pd
df = pd.DataFrame({"values": [10, 20, 30]})
agent = Agent(
role="{role_name}",
goal="Analyze {count} records",
backstory="Expert with {dataset}",
)
task = Task(
description="Process {dataset} with {count} records",
expected_output="{count} insights from {dataset}",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
inputs = {
"role_name": "Data Scientist",
"count": 3,
"dataset": df
}
crew._interpolate_inputs(inputs=inputs)
assert crew.agents[0].role == "Data Scientist"
assert "3" in crew.agents[0].goal
assert str(df) in crew.agents[0].backstory
assert str(df) in crew.tasks[0].description
assert "3" in crew.tasks[0].expected_output

View File

@@ -1279,40 +1279,54 @@ def test_interpolate_complex_combination():
def test_interpolate_invalid_type_validation():
# Test with type that fails serialization
class UnserializableObject:
def __str__(self):
raise Exception("Cannot serialize")
def __repr__(self):
raise Exception("Cannot serialize")
with pytest.raises(ValueError, match="Unable to serialize UnserializableObject"):
interpolate_only("{data}", {"data": UnserializableObject()})
result = interpolate_only("{data}", {"data": {1, 2, 3}})
assert "1" in result and "2" in result and "3" in result
# Test with invalid top-level type
with pytest.raises(ValueError) as excinfo:
interpolate_only("{data}", {"data": set()}) # type: ignore we are purposely testing this failure
assert "Unsupported type set" in str(excinfo.value)
# Test with invalid nested type
invalid_nested = {
"profile": {
"name": "John",
"age": 30,
"tags": {"a", "b", "c"}, # Set is invalid
}
}
with pytest.raises(ValueError) as excinfo:
interpolate_only("{data}", {"data": invalid_nested})
assert "Unsupported type set" in str(excinfo.value)
def test_interpolate_custom_object_validation():
class SerializableCustomObject:
class CustomObject:
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class UnserializableCustomObject:
def __init__(self, value):
self.value = value
def __str__(self):
raise Exception("Cannot serialize")
def __repr__(self):
raise Exception("Cannot serialize")
result = interpolate_only("{obj}", {"obj": SerializableCustomObject(5)})
assert "5" in result
with pytest.raises(ValueError, match="Unable to serialize UnserializableCustomObject"):
interpolate_only("{obj}", {"obj": UnserializableCustomObject(5)})
# Test with custom object at top level
with pytest.raises(ValueError) as excinfo:
interpolate_only("{obj}", {"obj": CustomObject(5)}) # type: ignore we are purposely testing this failure
assert "Unsupported type CustomObject" in str(excinfo.value)
# Test with nested custom object in dictionary
with pytest.raises(ValueError) as excinfo:
interpolate_only("{data}", {"data": {"valid": 1, "invalid": CustomObject(5)}})
assert "Unsupported type CustomObject" in str(excinfo.value)
# Test with nested custom object in list
with pytest.raises(ValueError) as excinfo:
interpolate_only("{data}", {"data": [1, "valid", CustomObject(5)]})
assert "Unsupported type CustomObject" in str(excinfo.value)
# Test with deeply nested custom object
with pytest.raises(ValueError) as excinfo:
interpolate_only(
"{data}", {"data": {"level1": {"level2": [{"level3": CustomObject(5)}]}}}
)
assert "Unsupported type CustomObject" in str(excinfo.value)
def test_interpolate_valid_complex_types():

View File

@@ -1,7 +1,6 @@
from typing import Any, Dict, List, Union
import pytest
import pandas as pd
from crewai.utilities.string_utils import interpolate_only
@@ -91,19 +90,16 @@ class TestInterpolateOnly:
assert "name" in str(excinfo.value)
def test_invalid_input_types(self):
"""Test that an error is raised when serialization fails."""
class UnserializableObject:
def __str__(self):
raise Exception("Cannot convert to string")
def __repr__(self):
raise Exception("Cannot convert to string")
"""Test that an error is raised with invalid input types."""
template = "Hello, {name}!"
inputs: Dict[str, Any] = {"name": UnserializableObject()}
# Using Any for this test since we're intentionally testing an invalid type
inputs: Dict[str, Any] = {"name": object()} # Object is not a valid input type
with pytest.raises(ValueError, match="Unable to serialize UnserializableObject"):
with pytest.raises(ValueError) as excinfo:
interpolate_only(template, inputs)
assert "unsupported type" in str(excinfo.value).lower()
def test_empty_input_string(self):
"""Test handling of empty or None input string."""
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
@@ -185,86 +181,7 @@ class TestInterpolateOnly:
template = "Hello, {name}!"
inputs: Dict[str, Any] = {}
with pytest.raises(ValueError):
with pytest.raises(ValueError) as excinfo:
interpolate_only(template, inputs)
def test_interpolate_only_with_dataframe(self):
"""Test that interpolate_only handles pandas DataFrames correctly."""
df = pd.DataFrame({"name": ["Alice", "Bob"], "age": [25, 30]})
result = interpolate_only("Data: {data}", {"data": df})
assert "Alice" in result
assert "Bob" in result
assert "25" in result
assert "30" in result
def test_interpolate_only_mixed_types_with_dataframe(self):
"""Test interpolate_only with mixed input types including DataFrame."""
df = pd.DataFrame({"col": [1, 2, 3]})
inputs = {
"text": "hello",
"number": 42,
"flag": True,
"data": df,
"items": [1, 2, 3]
}
template = "Text: {text}, Number: {number}, Flag: {flag}, Data: {data}, Items: {items}"
result = interpolate_only(template, inputs)
assert "hello" in result
assert "42" in result
assert "True" in result
assert "col" in result
assert "[1, 2, 3]" in result
def test_interpolate_only_unsupported_type_error(self):
"""Test that interpolate_only handles unsupported types gracefully."""
class CustomObject:
def __str__(self):
raise Exception("Cannot serialize")
def __repr__(self):
raise Exception("Cannot serialize")
with pytest.raises(ValueError, match="Unable to serialize CustomObject"):
interpolate_only("Value: {obj}", {"obj": CustomObject()})
def test_interpolate_only_complex_dataframe(self):
"""Test interpolate_only with more complex DataFrame structures."""
df = pd.DataFrame({
"product": ["Widget A", "Widget B", "Widget C"],
"sales": [100, 150, 200],
"region": ["North", "South", "East"]
})
result = interpolate_only("Sales report: {report}", {"report": df})
assert "Widget A" in result
assert "100" in result
assert "North" in result
assert "sales" in result
assert "product" in result
def test_interpolate_only_backward_compatibility(self):
"""Test that existing supported types still work correctly."""
inputs = {
"text": "hello",
"number": 42,
"float_val": 3.14,
"flag": True,
"nested": {"key": "value"},
"items": [1, 2, 3]
}
template = "Text: {text}, Number: {number}, Float: {float_val}, Flag: {flag}, Nested: {nested}, Items: {items}"
result = interpolate_only(template, inputs)
assert "hello" in result
assert "42" in result
assert "3.14" in result
assert "True" in result
assert "key" in result
assert "[1, 2, 3]" in result
assert "inputs dictionary cannot be empty" in str(excinfo.value).lower()

1350
uv.lock generated

File diff suppressed because it is too large Load Diff