feat: improvements on import native sdk support (#3725)

* feat: add support for Anthropic provider and enhance logging

- Introduced the `anthropic` package with version `0.69.0` in `pyproject.toml` and `uv.lock`, allowing for integration with the Anthropic API.
- Updated logging in the LLM class to provide clearer error messages when importing native providers, enhancing debugging capabilities.
- Improved error handling in the AnthropicCompletion class to guide users on installation via the updated error message format.
- Refactored import error handling in other provider classes to maintain consistency in error messaging and installation instructions.

* feat: enhance LLM support with Bedrock provider and update dependencies

- Added support for the `bedrock` provider in the LLM class, allowing integration with AWS Bedrock APIs.
- Updated `uv.lock` to replace `boto3` with `bedrock` in the dependencies, reflecting the new provider structure.
- Introduced `SUPPORTED_NATIVE_PROVIDERS` to include `bedrock` and ensure proper error handling when instantiating native providers.
- Enhanced error handling in the LLM class to raise informative errors when native provider instantiation fails.
- Added tests to validate the behavior of the new Bedrock provider and ensure fallback mechanisms work correctly for unsupported providers.

* test: update native provider fallback tests to expect ImportError

* adjust the test with the expected bevaior - raising ImportError

* this is exoecting the litellm format, all gemini native tests are in test_google.py

---------

Co-authored-by: Greyson LaLonde <greyson.r.lalonde@gmail.com>
This commit is contained in:
Lorenze Jay
2025-10-17 14:23:50 -07:00
committed by GitHub
parent c35a84de82
commit fa53a995e4
13 changed files with 128 additions and 91 deletions

View File

@@ -84,7 +84,7 @@ voyageai = [
litellm = [
"litellm>=1.74.9",
]
boto3 = [
bedrock = [
"boto3>=1.40.45",
]
google-genai = [
@@ -93,6 +93,9 @@ google-genai = [
azure-ai-inference = [
"azure-ai-inference>=1.0.0b9",
]
anthropic = [
"anthropic>=0.69.0",
]
[project.scripts]

View File

@@ -72,7 +72,7 @@ except ImportError:
load_dotenv()
logger = logging.getLogger(__name__)
if LITELLM_AVAILABLE:
litellm.suppress_debug_info = True
@@ -273,6 +273,17 @@ LLM_CONTEXT_WINDOW_SIZES: Final[dict[str, int]] = {
DEFAULT_CONTEXT_WINDOW_SIZE: Final[int] = 8192
CONTEXT_WINDOW_USAGE_RATIO: Final[float] = 0.85
SUPPORTED_NATIVE_PROVIDERS: Final[list[str]] = [
"openai",
"anthropic",
"claude",
"azure",
"azure_openai",
"google",
"gemini",
"bedrock",
"aws",
]
class Delta(TypedDict):
@@ -306,24 +317,17 @@ class LLM(BaseLLM):
provider = model.partition("/")[0] if "/" in model else "openai"
native_class = cls._get_native_provider(provider)
if native_class and not is_litellm:
if native_class and not is_litellm and provider in SUPPORTED_NATIVE_PROVIDERS:
try:
model_string = model.partition("/")[2] if "/" in model else model
return native_class(model=model_string, provider=provider, **kwargs)
except Exception as e:
import logging
logger = logging.getLogger(__name__)
logger.warning(
f"Native SDK failed for {provider}: {e}, falling back to LiteLLM"
)
raise ImportError(f"Error importing native provider: {e}") from e
# FALLBACK to LiteLLM
if not LITELLM_AVAILABLE:
raise ImportError(
"Please install the required dependencies:\n"
"- For LiteLLM: uv add litellm"
)
logger.error("LiteLLM is not available, falling back to LiteLLM")
raise ImportError("Fallback to LiteLLM is not available") from None
instance = object.__new__(cls)
super(LLM, instance).__init__(model=model, is_litellm=True, **kwargs)
@@ -334,46 +338,31 @@ class LLM(BaseLLM):
def _get_native_provider(cls, provider: str) -> type | None:
"""Get native provider class if available."""
if provider == "openai":
try:
from crewai.llms.providers.openai.completion import OpenAICompletion
from crewai.llms.providers.openai.completion import OpenAICompletion
return OpenAICompletion
except ImportError:
return None
return OpenAICompletion
elif provider == "anthropic" or provider == "claude":
try:
from crewai.llms.providers.anthropic.completion import (
AnthropicCompletion,
)
if provider == "anthropic" or provider == "claude":
from crewai.llms.providers.anthropic.completion import (
AnthropicCompletion,
)
return AnthropicCompletion
except ImportError:
return None
return AnthropicCompletion
elif provider == "azure" or provider == "azure_openai":
try:
from crewai.llms.providers.azure.completion import AzureCompletion
if provider == "azure" or provider == "azure_openai":
from crewai.llms.providers.azure.completion import AzureCompletion
return AzureCompletion
except ImportError:
return None
return AzureCompletion
elif provider == "google" or provider == "gemini":
try:
from crewai.llms.providers.gemini.completion import GeminiCompletion
if provider == "google" or provider == "gemini":
from crewai.llms.providers.gemini.completion import GeminiCompletion
return GeminiCompletion
except ImportError:
return None
return GeminiCompletion
elif provider == "bedrock":
try:
from crewai.llms.providers.bedrock.completion import BedrockCompletion
if provider == "bedrock":
from crewai.llms.providers.bedrock.completion import BedrockCompletion
return BedrockCompletion
except ImportError:
return None
return BedrockCompletion
return None

View File

@@ -16,7 +16,7 @@ try:
from anthropic.types.tool_use_block import ToolUseBlock
except ImportError:
raise ImportError(
"Anthropic native provider not available, to install: `uv add anthropic`"
'Anthropic native provider not available, to install: uv add "crewai[anthropic]"'
) from None

View File

@@ -23,7 +23,7 @@ try:
except ImportError:
raise ImportError(
"Azure AI Inference native provider not available, to install: `uv add azure-ai-inference`"
'Azure AI Inference native provider not available, to install: uv add "crewai[azure-ai-inference]"'
) from None

View File

@@ -35,7 +35,7 @@ try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
raise ImportError(
"AWS Bedrock native provider not available, to install: `uv add boto3`"
'AWS Bedrock native provider not available, to install: uv add "crewai[bedrock]"'
) from None

View File

@@ -16,7 +16,7 @@ try:
from google.genai.errors import APIError
except ImportError:
raise ImportError(
"Google Gen AI native provider not available, to install: `uv add google-genai`"
'Google Gen AI native provider not available, to install: uv add "crewai[google-genai]"'
) from None

View File

@@ -141,9 +141,10 @@ def test_anthropic_completion_module_is_imported():
assert hasattr(completion_mod, 'AnthropicCompletion')
def test_fallback_to_litellm_when_native_anthropic_fails():
def test_native_anthropic_raises_error_when_initialization_fails():
"""
Test that LLM falls back to LiteLLM when native Anthropic completion fails
Test that LLM raises ImportError when native Anthropic completion fails to initialize.
This ensures we don't silently fall back when there's a configuration issue.
"""
# Mock the _get_native_provider to return a failing class
with patch('crewai.llm.LLM._get_native_provider') as mock_get_provider:
@@ -154,12 +155,12 @@ def test_fallback_to_litellm_when_native_anthropic_fails():
mock_get_provider.return_value = FailingCompletion
# This should fall back to LiteLLM
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
# This should raise ImportError, not fall back to LiteLLM
with pytest.raises(ImportError) as excinfo:
LLM(model="anthropic/claude-3-5-sonnet-20241022")
# Check that it's using LiteLLM
assert hasattr(llm, 'is_litellm')
assert llm.is_litellm == True
assert "Error importing native provider" in str(excinfo.value)
assert "Native Anthropic SDK failed" in str(excinfo.value)
def test_anthropic_completion_initialization_parameters():

View File

@@ -123,9 +123,10 @@ def test_azure_completion_module_is_imported():
assert hasattr(completion_mod, 'AzureCompletion')
def test_fallback_to_litellm_when_native_azure_fails():
def test_native_azure_raises_error_when_initialization_fails():
"""
Test that LLM falls back to LiteLLM when native Azure completion fails
Test that LLM raises ImportError when native Azure completion fails to initialize.
This ensures we don't silently fall back when there's a configuration issue.
"""
# Mock the _get_native_provider to return a failing class
with patch('crewai.llm.LLM._get_native_provider') as mock_get_provider:
@@ -136,12 +137,12 @@ def test_fallback_to_litellm_when_native_azure_fails():
mock_get_provider.return_value = FailingCompletion
# This should fall back to LiteLLM
llm = LLM(model="azure/gpt-4")
# This should raise ImportError, not fall back to LiteLLM
with pytest.raises(ImportError) as excinfo:
LLM(model="azure/gpt-4")
# Check that it's using LiteLLM
assert hasattr(llm, 'is_litellm')
assert llm.is_litellm == True
assert "Error importing native provider" in str(excinfo.value)
assert "Native Azure AI Inference SDK failed" in str(excinfo.value)
def test_azure_completion_initialization_parameters():

View File

@@ -85,9 +85,13 @@ def test_bedrock_completion_module_is_imported():
assert hasattr(completion_mod, 'BedrockCompletion')
def test_fallback_to_litellm_when_native_bedrock_fails():
def test_native_bedrock_raises_error_when_initialization_fails():
"""
Test that LLM falls back to LiteLLM when native Bedrock completion fails
Test that LLM raises ImportError when native Bedrock completion fails.
With the new behavior, when a native provider is in SUPPORTED_NATIVE_PROVIDERS
but fails to instantiate, we raise an ImportError instead of silently falling back.
This provides clearer error messages to users about missing dependencies.
"""
# Mock the _get_native_provider to return a failing class
with patch('crewai.llm.LLM._get_native_provider') as mock_get_provider:
@@ -98,12 +102,13 @@ def test_fallback_to_litellm_when_native_bedrock_fails():
mock_get_provider.return_value = FailingCompletion
# This should fall back to LiteLLM
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# This should raise ImportError with clear message
with pytest.raises(ImportError) as excinfo:
LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Check that it's using LiteLLM
assert hasattr(llm, 'is_litellm')
assert llm.is_litellm == True
# Verify the error message is helpful
assert "Error importing native provider" in str(excinfo.value)
assert "Native AWS Bedrock SDK failed" in str(excinfo.value)
def test_bedrock_completion_initialization_parameters():

View File

@@ -120,9 +120,13 @@ def test_gemini_completion_module_is_imported():
assert hasattr(completion_mod, 'GeminiCompletion')
def test_fallback_to_litellm_when_native_gemini_fails():
def test_native_gemini_raises_error_when_initialization_fails():
"""
Test that LLM falls back to LiteLLM when native Gemini completion fails
Test that LLM raises ImportError when native Gemini completion fails.
With the new behavior, when a native provider is in SUPPORTED_NATIVE_PROVIDERS
but fails to instantiate, we raise an ImportError instead of silently falling back.
This provides clearer error messages to users about missing dependencies.
"""
# Mock the _get_native_provider to return a failing class
with patch('crewai.llm.LLM._get_native_provider') as mock_get_provider:
@@ -133,12 +137,13 @@ def test_fallback_to_litellm_when_native_gemini_fails():
mock_get_provider.return_value = FailingCompletion
# This should fall back to LiteLLM
llm = LLM(model="google/gemini-2.0-flash-001")
# This should raise ImportError with clear message
with pytest.raises(ImportError) as excinfo:
LLM(model="google/gemini-2.0-flash-001")
# Check that it's using LiteLLM
assert hasattr(llm, 'is_litellm')
assert llm.is_litellm == True
# Verify the error message is helpful
assert "Error importing native provider" in str(excinfo.value)
assert "Native Google Gen AI SDK failed" in str(excinfo.value)
def test_gemini_completion_initialization_parameters():

View File

@@ -81,9 +81,10 @@ def test_openai_completion_module_is_imported():
assert hasattr(completion_mod, 'OpenAICompletion')
def test_fallback_to_litellm_when_native_fails():
def test_native_openai_raises_error_when_initialization_fails():
"""
Test that LLM falls back to LiteLLM when native OpenAI completion fails
Test that LLM raises ImportError when native OpenAI completion fails to initialize.
This ensures we don't silently fall back when there's a configuration issue.
"""
# Mock the _get_native_provider to return a failing class
with patch('crewai.llm.LLM._get_native_provider') as mock_get_provider:
@@ -94,12 +95,12 @@ def test_fallback_to_litellm_when_native_fails():
mock_get_provider.return_value = FailingCompletion
# This should fall back to LiteLLM
llm = LLM(model="openai/gpt-4o")
# This should raise ImportError, not fall back to LiteLLM
with pytest.raises(ImportError) as excinfo:
LLM(model="openai/gpt-4o")
# Check that it's using LiteLLM
assert hasattr(llm, 'is_litellm')
assert llm.is_litellm == True
assert "Error importing native provider" in str(excinfo.value)
assert "Native SDK failed" in str(excinfo.value)
def test_openai_completion_initialization_parameters():

View File

@@ -215,7 +215,7 @@ def test_get_custom_llm_provider_openrouter():
def test_get_custom_llm_provider_gemini():
llm = LLM(model="gemini/gemini-1.5-pro")
llm = LLM(model="gemini/gemini-1.5-pro", is_litellm=True)
assert llm._get_custom_llm_provider() == "gemini"
@@ -243,7 +243,7 @@ def test_validate_call_params_not_supported():
# Patch supports_response_schema to simulate an unsupported model.
with patch("crewai.llm.supports_response_schema", return_value=False):
llm = LLM(model="gemini/gemini-1.5-pro", response_format=DummyResponse)
llm = LLM(model="gemini/gemini-1.5-pro", response_format=DummyResponse, is_litellm=True)
with pytest.raises(ValueError) as excinfo:
llm._validate_call_params()
assert "does not support response_format" in str(excinfo.value)
@@ -251,7 +251,7 @@ def test_validate_call_params_not_supported():
def test_validate_call_params_no_response_format():
# When no response_format is provided, no validation error should occur.
llm = LLM(model="gemini/gemini-1.5-pro", response_format=None)
llm = LLM(model="gemini/gemini-1.5-pro", response_format=None, is_litellm=True)
llm._validate_call_params()
@@ -267,7 +267,8 @@ def test_validate_call_params_no_response_format():
],
)
def test_gemini_models(model):
llm = LLM(model=model)
# Use LiteLLM for VCR compatibility (VCR can intercept HTTP calls but not native SDK calls)
llm = LLM(model=model, is_litellm=True)
result = llm.call("What is the capital of France?")
assert isinstance(result, str)
assert "Paris" in result
@@ -281,7 +282,8 @@ def test_gemini_models(model):
],
)
def test_gemma3(model):
llm = LLM(model=model)
# Use LiteLLM for VCR compatibility (VCR can intercept HTTP calls but not native SDK calls)
llm = LLM(model=model, is_litellm=True)
result = llm.call("What is the capital of France?")
assert isinstance(result, str)
assert "Paris" in result
@@ -697,3 +699,29 @@ def test_ollama_does_not_modify_when_last_is_user(ollama_llm):
formatted = ollama_llm._format_messages_for_provider(original_messages)
assert formatted == original_messages
def test_native_provider_raises_error_when_supported_but_fails():
"""Test that when a native provider is in SUPPORTED_NATIVE_PROVIDERS but fails to instantiate, we raise the error."""
with patch("crewai.llm.SUPPORTED_NATIVE_PROVIDERS", ["openai"]):
with patch("crewai.llm.LLM._get_native_provider") as mock_get_native:
# Mock that provider exists but throws an error when instantiated
mock_provider = MagicMock()
mock_provider.side_effect = ValueError("Native provider initialization failed")
mock_get_native.return_value = mock_provider
with pytest.raises(ImportError) as excinfo:
LLM(model="openai/gpt-4", is_litellm=False)
assert "Error importing native provider" in str(excinfo.value)
assert "Native provider initialization failed" in str(excinfo.value)
def test_native_provider_falls_back_to_litellm_when_not_in_supported_list():
"""Test that when a provider is not in SUPPORTED_NATIVE_PROVIDERS, we fall back to LiteLLM."""
with patch("crewai.llm.SUPPORTED_NATIVE_PROVIDERS", ["openai", "anthropic"]):
# Using a provider not in the supported list
llm = LLM(model="groq/llama-3.1-70b-versatile", is_litellm=False)
# Should fall back to LiteLLM
assert llm.is_litellm is True
assert llm.model == "groq/llama-3.1-70b-versatile"

10
uv.lock generated
View File

@@ -1066,13 +1066,16 @@ dependencies = [
aisuite = [
{ name = "aisuite" },
]
anthropic = [
{ name = "anthropic" },
]
aws = [
{ name = "boto3" },
]
azure-ai-inference = [
{ name = "azure-ai-inference" },
]
boto3 = [
bedrock = [
{ name = "boto3" },
]
docling = [
@@ -1115,10 +1118,11 @@ watson = [
[package.metadata]
requires-dist = [
{ name = "aisuite", marker = "extra == 'aisuite'", specifier = ">=0.1.10" },
{ name = "anthropic", marker = "extra == 'anthropic'", specifier = ">=0.69.0" },
{ name = "appdirs", specifier = ">=1.4.4" },
{ name = "azure-ai-inference", marker = "extra == 'azure-ai-inference'", specifier = ">=1.0.0b9" },
{ name = "boto3", marker = "extra == 'aws'", specifier = ">=1.40.38" },
{ name = "boto3", marker = "extra == 'boto3'", specifier = ">=1.40.45" },
{ name = "boto3", marker = "extra == 'bedrock'", specifier = ">=1.40.45" },
{ name = "chromadb", specifier = "~=1.1.0" },
{ name = "click", specifier = ">=8.1.7" },
{ name = "crewai-tools", marker = "extra == 'tools'", editable = "lib/crewai-tools" },
@@ -1155,7 +1159,7 @@ requires-dist = [
{ name = "uv", specifier = ">=0.4.25" },
{ name = "voyageai", marker = "extra == 'voyageai'", specifier = ">=0.3.5" },
]
provides-extras = ["aisuite", "aws", "azure-ai-inference", "boto3", "docling", "embeddings", "google-genai", "litellm", "mem0", "openpyxl", "pandas", "pdfplumber", "qdrant", "tools", "voyageai", "watson"]
provides-extras = ["aisuite", "anthropic", "aws", "azure-ai-inference", "bedrock", "docling", "embeddings", "google-genai", "litellm", "mem0", "openpyxl", "pandas", "pdfplumber", "qdrant", "tools", "voyageai", "watson"]
[[package]]
name = "crewai-devtools"