Compare commits

..

1 Commits

Author SHA1 Message Date
lorenzejay
801a424b99 Update version to 0.130.0 and dependencies in pyproject.toml and uv.lock
- Bump CrewAI version from 0.126.0 to 0.130.0 in pyproject.toml and uv.lock.
- Update optional dependency 'crewai-tools' version from 0.46.0 to 0.47.1.
- Adjust dependency specifications in CLI templates to reflect the new version.
2025-06-11 16:38:40 -07:00
8 changed files with 138 additions and 288 deletions

View File

@@ -1,6 +1,6 @@
[project]
name = "crewai"
version = "0.126.0"
version = "0.130.0"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
readme = "README.md"
requires-python = ">=3.10,<3.14"
@@ -47,7 +47,7 @@ Documentation = "https://docs.crewai.com"
Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = ["crewai-tools~=0.46.0"]
tools = ["crewai-tools~=0.47.1"]
embeddings = [
"tiktoken~=0.8.0"
]

View File

@@ -1,20 +1,16 @@
import warnings
from crewai.agent import Agent
from crewai import agent
from crewai import cli
from crewai.crew import Crew
from crewai.crews.crew_output import CrewOutput
from crewai.flow.flow import Flow
from crewai.knowledge.knowledge import Knowledge
from crewai import knowledge
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.process import Process
from crewai.task import Task
from crewai.tasks.llm_guardrail import LLMGuardrail
from crewai.tasks.task_output import TaskOutput
from crewai import utilities
warnings.filterwarnings(
"ignore",
@@ -22,11 +18,9 @@ warnings.filterwarnings(
category=UserWarning,
module="pydantic.main",
)
__version__ = "0.126.0"
__version__ = "0.130.0"
__all__ = [
"Agent",
"agent",
"cli",
"Crew",
"CrewOutput",
"Process",
@@ -35,8 +29,6 @@ __all__ = [
"BaseLLM",
"Flow",
"Knowledge",
"knowledge",
"TaskOutput",
"LLMGuardrail",
"utilities",
]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.126.0,<1.0.0"
"crewai[tools]>=0.130.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.126.0,<1.0.0",
"crewai[tools]>=0.130.0,<1.0.0",
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.126.0"
"crewai[tools]>=0.130.0"
]
[tool.crewai]

View File

@@ -1,6 +1,8 @@
import json
import logging
import os
import sys
import threading
import warnings
from collections import defaultdict
from contextlib import contextmanager
@@ -46,7 +48,8 @@ with warnings.catch_warnings():
from litellm.utils import supports_response_schema
import io
from typing import TextIO
from crewai.llms.base_llm import BaseLLM
from crewai.utilities.events import crewai_event_bus
@@ -57,7 +60,69 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
load_dotenv()
class FilteredStream(io.TextIOBase):
_lock = None
def __init__(self, original_stream: TextIO):
self._original_stream = original_stream
self._lock = threading.Lock()
def write(self, s: str) -> int:
if not self._lock:
self._lock = threading.Lock()
with self._lock:
lower_s = s.lower()
# Skip common noisy LiteLLM banners and any other lines that contain "litellm"
if (
"give feedback / get help" in lower_s
or "litellm.info:" in lower_s
or "litellm" in lower_s
or "Consider using a smaller input or implementing a text splitting strategy" in lower_s
):
return 0
return self._original_stream.write(s)
def flush(self):
with self._lock:
return self._original_stream.flush()
def __getattr__(self, name):
"""Delegate attribute access to the wrapped original stream.
This ensures compatibility with libraries (e.g., Rich) that rely on
attributes such as `encoding`, `isatty`, `buffer`, etc., which may not
be explicitly defined on this proxy class.
"""
return getattr(self._original_stream, name)
# Delegate common properties/methods explicitly so they aren't shadowed by
# the TextIOBase defaults (e.g., .encoding returns None by default, which
# confuses Rich). These explicit pass-throughs ensure the wrapped Console
# still sees a fully-featured stream.
@property
def encoding(self):
return getattr(self._original_stream, "encoding", "utf-8")
def isatty(self):
return self._original_stream.isatty()
def fileno(self):
return self._original_stream.fileno()
def writable(self):
return True
# Apply the filtered stream globally so that any subsequent writes containing the filtered
# keywords (e.g., "litellm") are hidden from terminal output. We guard against double
# wrapping to ensure idempotency in environments where this module might be reloaded.
if not isinstance(sys.stdout, FilteredStream):
sys.stdout = FilteredStream(sys.stdout)
if not isinstance(sys.stderr, FilteredStream):
sys.stderr = FilteredStream(sys.stderr)
LLM_CONTEXT_WINDOW_SIZES = {
@@ -201,40 +266,6 @@ def suppress_warnings():
yield
@contextmanager
def suppress_litellm_output():
"""Contextually suppress litellm-related logging output during LLM calls."""
litellm_logger = logging.getLogger("litellm")
original_level = litellm_logger.level
warning_patterns = [
".*give feedback.*",
".*Consider using a smaller input.*",
".*litellm\\.info:.*",
".*text splitting strategy.*"
]
try:
with warnings.catch_warnings():
for pattern in warning_patterns:
warnings.filterwarnings("ignore", message=pattern)
try:
litellm_logger.setLevel(logging.WARNING)
except Exception as e:
logging.debug(f"Error setting logger level: {e}")
yield
except Exception as e:
logging.debug(f"Error in litellm output suppression: {e}")
raise
finally:
try:
litellm_logger.setLevel(original_level)
except Exception as e:
logging.debug(f"Error restoring logger level: {e}")
class Delta(TypedDict):
content: Optional[str]
role: Optional[str]
@@ -419,61 +450,60 @@ class LLM(BaseLLM):
try:
# --- 3) Process each chunk in the stream
with suppress_litellm_output():
for chunk in litellm.completion(**params):
chunk_count += 1
last_chunk = chunk
for chunk in litellm.completion(**params):
chunk_count += 1
last_chunk = chunk
# Extract content from the chunk
chunk_content = None
# Extract content from the chunk
chunk_content = None
# Safely extract content from various chunk formats
try:
# Try to access choices safely
choices = None
if isinstance(chunk, dict) and "choices" in chunk:
choices = chunk["choices"]
elif hasattr(chunk, "choices"):
# Check if choices is not a type but an actual attribute with value
if not isinstance(getattr(chunk, "choices"), type):
choices = getattr(chunk, "choices")
# Safely extract content from various chunk formats
try:
# Try to access choices safely
choices = None
if isinstance(chunk, dict) and "choices" in chunk:
choices = chunk["choices"]
elif hasattr(chunk, "choices"):
# Check if choices is not a type but an actual attribute with value
if not isinstance(getattr(chunk, "choices"), type):
choices = getattr(chunk, "choices")
# Try to extract usage information if available
if isinstance(chunk, dict) and "usage" in chunk:
usage_info = chunk["usage"]
elif hasattr(chunk, "usage"):
# Check if usage is not a type but an actual attribute with value
if not isinstance(getattr(chunk, "usage"), type):
usage_info = getattr(chunk, "usage")
# Try to extract usage information if available
if isinstance(chunk, dict) and "usage" in chunk:
usage_info = chunk["usage"]
elif hasattr(chunk, "usage"):
# Check if usage is not a type but an actual attribute with value
if not isinstance(getattr(chunk, "usage"), type):
usage_info = getattr(chunk, "usage")
if choices and len(choices) > 0:
choice = choices[0]
if choices and len(choices) > 0:
choice = choices[0]
# Handle different delta formats
delta = None
if isinstance(choice, dict) and "delta" in choice:
delta = choice["delta"]
elif hasattr(choice, "delta"):
delta = getattr(choice, "delta")
# Handle different delta formats
delta = None
if isinstance(choice, dict) and "delta" in choice:
delta = choice["delta"]
elif hasattr(choice, "delta"):
delta = getattr(choice, "delta")
# Extract content from delta
if delta:
# Handle dict format
if isinstance(delta, dict):
if "content" in delta and delta["content"] is not None:
chunk_content = delta["content"]
# Handle object format
elif hasattr(delta, "content"):
chunk_content = getattr(delta, "content")
# Extract content from delta
if delta:
# Handle dict format
if isinstance(delta, dict):
if "content" in delta and delta["content"] is not None:
chunk_content = delta["content"]
# Handle object format
elif hasattr(delta, "content"):
chunk_content = getattr(delta, "content")
# Handle case where content might be None or empty
if chunk_content is None and isinstance(delta, dict):
# Some models might send empty content chunks
chunk_content = ""
# Handle case where content might be None or empty
if chunk_content is None and isinstance(delta, dict):
# Some models might send empty content chunks
chunk_content = ""
# Enable tool calls using streaming
if "tool_calls" in delta:
tool_calls = delta["tool_calls"]
# Enable tool calls using streaming
if "tool_calls" in delta:
tool_calls = delta["tool_calls"]
if tool_calls:
result = self._handle_streaming_tool_calls(
@@ -484,22 +514,21 @@ class LLM(BaseLLM):
if result is not None:
chunk_content = result
except Exception as e:
logging.error(f"Error extracting content from chunk: {e}", exc_info=True)
logging.debug(f"Chunk format: {type(chunk)}, content: {chunk}")
continue
except Exception as e:
logging.debug(f"Error extracting content from chunk: {e}")
logging.debug(f"Chunk format: {type(chunk)}, content: {chunk}")
# Only add non-None content to the response
if chunk_content is not None:
# Add the chunk content to the full response
full_response += chunk_content
# Only add non-None content to the response
if chunk_content is not None:
# Add the chunk content to the full response
full_response += chunk_content
# Emit the chunk event
assert hasattr(crewai_event_bus, "emit")
crewai_event_bus.emit(
self,
event=LLMStreamChunkEvent(chunk=chunk_content),
)
# Emit the chunk event
assert hasattr(crewai_event_bus, "emit")
crewai_event_bus.emit(
self,
event=LLMStreamChunkEvent(chunk=chunk_content),
)
# --- 4) Fallback to non-streaming if no content received
if not full_response.strip() and chunk_count == 0:
logging.warning(
@@ -736,8 +765,7 @@ class LLM(BaseLLM):
# and convert them to our own exception type for consistent handling
# across the codebase. This allows CrewAgentExecutor to handle context
# length issues appropriately.
with suppress_litellm_output():
response = litellm.completion(**params)
response = litellm.completion(**params)
except ContextWindowExceededError as e:
# Convert litellm's context window error to our own exception type
# for consistent handling in the rest of the codebase

View File

@@ -1,171 +0,0 @@
"""Test to reproduce and verify fix for issue #3000: sys.stdout/stderr hijacking."""
import sys
import io
from unittest.mock import patch, MagicMock
import pytest
def test_crewai_hijacks_sys_streams():
"""Test that importing crewai.llm currently hijacks sys.stdout and sys.stderr (before fix)."""
original_stdout = sys.stdout
original_stderr = sys.stderr
import crewai.llm # noqa: F401
try:
assert sys.stdout is not original_stdout, "sys.stdout should be hijacked by FilteredStream"
assert sys.stderr is not original_stderr, "sys.stderr should be hijacked by FilteredStream"
assert hasattr(sys.stdout, '_original_stream'), "sys.stdout should be wrapped by FilteredStream"
assert hasattr(sys.stderr, '_original_stream'), "sys.stderr should be wrapped by FilteredStream"
assert False, "The fix didn't work - streams are still being hijacked"
except AssertionError:
pass
def test_litellm_output_is_filtered():
"""Test that litellm-related output is currently filtered (before fix)."""
import crewai.llm # noqa: F401
captured_output = io.StringIO()
test_strings = [
"litellm.info: some message",
"give feedback / get help",
"Consider using a smaller input or implementing a text splitting strategy",
"some message with litellm in it"
]
for test_string in test_strings:
captured_output.seek(0)
captured_output.truncate(0)
original_stdout = sys.stdout
sys.stdout = captured_output
try:
print(test_string, end='')
assert captured_output.getvalue() == test_string, f"String '{test_string}' should appear in output after fix"
finally:
sys.stdout = original_stdout
def test_normal_output_passes_through():
"""Test that normal output passes through correctly after the fix."""
import crewai.llm # noqa: F401
captured_output = io.StringIO()
original_stdout = sys.stdout
sys.stdout = captured_output
try:
test_string = "This is normal output that should pass through"
print(test_string, end='')
assert captured_output.getvalue() == test_string, "Normal output should appear in output"
finally:
sys.stdout = original_stdout
def test_crewai_does_not_hijack_sys_streams_after_fix():
"""Test that after the fix, importing crewai.llm does NOT hijack sys.stdout and sys.stderr."""
original_stdout = sys.stdout
original_stderr = sys.stderr
if 'crewai.llm' in sys.modules:
del sys.modules['crewai.llm']
if 'crewai' in sys.modules:
del sys.modules['crewai']
import crewai.llm # noqa: F401
assert sys.stdout is original_stdout, "sys.stdout should NOT be hijacked after fix"
assert sys.stderr is original_stderr, "sys.stderr should NOT be hijacked after fix"
assert not hasattr(sys.stdout, '_original_stream'), "sys.stdout should not be wrapped after fix"
assert not hasattr(sys.stderr, '_original_stream'), "sys.stderr should not be wrapped after fix"
def test_litellm_output_still_suppressed_during_llm_calls():
"""Test that litellm output is still suppressed during actual LLM calls after the fix."""
from crewai.llm import LLM
captured_stdout = io.StringIO()
captured_stderr = io.StringIO()
with patch('sys.stdout', captured_stdout), patch('sys.stderr', captured_stderr):
with patch('litellm.completion') as mock_completion:
mock_completion.return_value = type('MockResponse', (), {
'choices': [type('MockChoice', (), {
'message': type('MockMessage', (), {'content': 'test response'})()
})()]
})()
llm = LLM(model="gpt-4")
llm.call([{"role": "user", "content": "test"}])
output = captured_stdout.getvalue() + captured_stderr.getvalue()
assert "litellm" not in output.lower(), "litellm output should still be suppressed during calls"
def test_concurrent_llm_calls():
"""Test that contextual suppression works correctly with concurrent calls."""
import threading
from crewai.llm import LLM
results = []
def make_llm_call():
with patch('litellm.completion') as mock_completion:
mock_completion.return_value = type('MockResponse', (), {
'choices': [type('MockChoice', (), {
'message': type('MockMessage', (), {'content': 'test response'})()
})()]
})()
llm = LLM(model="gpt-4")
result = llm.call([{"role": "user", "content": "test"}])
results.append(result)
threads = [threading.Thread(target=make_llm_call) for _ in range(3)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert len(results) == 3
assert all("test response" in result for result in results)
def test_logger_performance():
"""Test that logger operations work correctly without global caching."""
from crewai.llm import suppress_litellm_output
with patch('logging.getLogger') as mock_get_logger:
mock_logger = MagicMock()
mock_get_logger.return_value = mock_logger
with suppress_litellm_output():
pass
with suppress_litellm_output():
pass
assert mock_get_logger.call_count == 2
mock_get_logger.assert_called_with("litellm")
def test_suppression_error_handling():
"""Test that suppression continues even if logger operations fail."""
from crewai.llm import suppress_litellm_output
with patch('logging.getLogger') as mock_get_logger:
mock_logger = MagicMock()
mock_logger.setLevel.side_effect = Exception("Logger error")
mock_get_logger.return_value = mock_logger
try:
with suppress_litellm_output():
result = "operation completed"
assert result == "operation completed"
except Exception:
pytest.fail("Suppression should not fail even if logger operations fail")

11
uv.lock generated
View File

@@ -725,7 +725,7 @@ wheels = [
[[package]]
name = "crewai"
version = "0.126.0"
version = "0.130.0"
source = { editable = "." }
dependencies = [
{ name = "appdirs" },
@@ -814,7 +814,7 @@ requires-dist = [
{ name = "blinker", specifier = ">=1.9.0" },
{ name = "chromadb", specifier = ">=0.5.23" },
{ name = "click", specifier = ">=8.1.7" },
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.46.0" },
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.47.1" },
{ name = "docling", marker = "extra == 'docling'", specifier = ">=2.12.0" },
{ name = "instructor", specifier = ">=1.3.3" },
{ name = "json-repair", specifier = ">=0.25.2" },
@@ -866,7 +866,7 @@ dev = [
[[package]]
name = "crewai-tools"
version = "0.46.0"
version = "0.47.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "chromadb" },
@@ -880,10 +880,11 @@ dependencies = [
{ name = "pyright" },
{ name = "pytube" },
{ name = "requests" },
{ name = "tiktoken" },
]
sdist = { url = "https://files.pythonhosted.org/packages/0d/9e/69109f5d5b398b2edeccec1055e93cdceac3becd04407bcce97de6557180/crewai_tools-0.46.0.tar.gz", hash = "sha256:c8f89247199d528c77db4b450a1ca781b5d32405982467baf516ede4b2045bd1", size = 913715 }
sdist = { url = "https://files.pythonhosted.org/packages/0e/cd/fc5a96be8c108febcc2c767714e3ec9b70cca9be8e6b29bba7c1874fb6d2/crewai_tools-0.47.1.tar.gz", hash = "sha256:4de5ebf320aeae317ffabe2e4704b98b5d791f663196871fb5ad2e7bbea14a82", size = 921418 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/ab/62/0b68637ce820fbb0385495bd6d75ceb27de53f060df5417f293419826481/crewai_tools-0.46.0-py3-none-any.whl", hash = "sha256:f8e60723869ca36ede7b43dcc1491ebefc93410a972d97b7b0ce59c4bd7a826b", size = 606190 },
{ url = "https://files.pythonhosted.org/packages/e3/2c/05d9fa584d9d814c0c8c4c3793df572222417695fe3d716f14bc274376d6/crewai_tools-0.47.1-py3-none-any.whl", hash = "sha256:4dc9bb0a08e3afa33c6b9efb163e47181801a7906be7241977426e6d3dec0a05", size = 606305 },
]
[[package]]