Compare commits

..

1 Commits

Author SHA1 Message Date
Devin AI
8b52e05006 feat: update LiteLLM dependency to >=1.77.4 to address CVEs
- Updated LiteLLM from ==1.74.9 to >=1.77.4
- Added test to verify LiteLLM integration works with new version
- Addresses security vulnerabilities mentioned in issue #3602

Co-Authored-By: João <joao@crewai.com>
2025-09-26 15:35:48 +00:00
11 changed files with 3263 additions and 3280 deletions

View File

@@ -11,7 +11,7 @@ dependencies = [
# Core Dependencies
"pydantic>=2.11.9",
"openai>=1.13.3",
"litellm==1.74.9",
"litellm>=1.77.4",
"instructor>=1.3.3",
# Text Processing
"pdfplumber>=0.11.4",

View File

@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
_suppress_pydantic_deprecation_warnings()
__version__ = "0.201.1"
__version__ = "0.201.0"
_telemetry_submitted = False

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.201.1,<1.0.0"
"crewai[tools]>=0.201.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.201.1,<1.0.0",
"crewai[tools]>=0.201.0,<1.0.0",
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.201.1"
"crewai[tools]>=0.201.0"
]
[tool.crewai]

View File

@@ -140,10 +140,3 @@ class EmbeddingFunction(Protocol[D]):
return validate_embeddings(normalized)
cls.__call__ = wrapped_call # type: ignore[method-assign]
def embed_query(self, input: D) -> Embeddings:
"""
Get the embeddings for a query input.
This method is optional, and if not implemented, the default behavior is to call __call__.
"""
return self.__call__(input=input)

View File

@@ -2,9 +2,10 @@
from typing import cast
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
from typing_extensions import Unpack
from crewai.rag.core.base_embeddings_callable import EmbeddingFunction
from crewai.rag.core.types import Documents, Embeddings
from crewai.rag.embeddings.providers.ibm.types import WatsonXProviderConfig
@@ -17,14 +18,8 @@ class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
Args:
**kwargs: Configuration parameters for WatsonX Embeddings and Credentials.
"""
super().__init__(**kwargs)
self._config = kwargs
@staticmethod
def name() -> str:
"""Return the name of the embedding function for ChromaDB compatibility."""
return "watsonx"
def __call__(self, input: Documents) -> Embeddings:
"""Generate embeddings for input documents.

View File

@@ -2,9 +2,10 @@
from typing import cast
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
from typing_extensions import Unpack
from crewai.rag.core.base_embeddings_callable import EmbeddingFunction
from crewai.rag.core.types import Documents, Embeddings
from crewai.rag.embeddings.providers.voyageai.types import VoyageAIProviderConfig
@@ -32,11 +33,6 @@ class VoyageAIEmbeddingFunction(EmbeddingFunction[Documents]):
timeout=kwargs.get("timeout"),
)
@staticmethod
def name() -> str:
"""Return the name of the embedding function for ChromaDB compatibility."""
return "voyageai"
def __call__(self, input: Documents) -> Embeddings:
"""Generate embeddings for input documents.

View File

@@ -11,10 +11,7 @@ from crewai.rag.embeddings.providers.google.types import (
VertexAIProviderSpec,
)
from crewai.rag.embeddings.providers.huggingface.types import HuggingFaceProviderSpec
from crewai.rag.embeddings.providers.ibm.types import (
WatsonProviderSpec,
WatsonXProviderSpec,
)
from crewai.rag.embeddings.providers.ibm.types import WatsonXProviderSpec
from crewai.rag.embeddings.providers.instructor.types import InstructorProviderSpec
from crewai.rag.embeddings.providers.jina.types import JinaProviderSpec
from crewai.rag.embeddings.providers.microsoft.types import AzureProviderSpec
@@ -47,7 +44,6 @@ ProviderSpec = (
| Text2VecProviderSpec
| VertexAIProviderSpec
| VoyageAIProviderSpec
| WatsonProviderSpec # Deprecated, use WatsonXProviderSpec
| WatsonXProviderSpec
)

View File

@@ -3,6 +3,7 @@ import os
from time import sleep
from unittest.mock import MagicMock, patch
import litellm
import pytest
from pydantic import BaseModel
@@ -711,3 +712,28 @@ def test_ollama_does_not_modify_when_last_is_user(ollama_llm):
formatted = ollama_llm._format_messages_for_provider(original_messages)
assert formatted == original_messages
def test_litellm_version_is_updated():
"""Test that LiteLLM version is >= 1.77.4 to address CVEs."""
import importlib.metadata
litellm_version = importlib.metadata.version("litellm")
version_parts = [int(x) for x in litellm_version.split('.')]
# Check that version is >= 1.77.4
assert version_parts[0] >= 1
if version_parts[0] == 1:
assert version_parts[1] >= 77
if version_parts[1] == 77:
assert version_parts[2] >= 4
def test_litellm_import_and_basic_functionality():
"""Test that LiteLLM can be imported and basic functionality works."""
assert hasattr(litellm, 'completion')
assert hasattr(litellm, 'get_supported_openai_params')
supported_params = litellm.get_supported_openai_params("gpt-4")
assert isinstance(supported_params, list)
assert len(supported_params) > 0

6477
uv.lock generated

File diff suppressed because it is too large Load Diff