fix: route llm model syntax to litellm

* fix: route llm model syntax to litellm

* wip: add list of supported models
This commit is contained in:
Greyson LaLonde
2025-11-07 13:34:15 -05:00
committed by GitHub
parent d29867bbb6
commit 1ed307b58c
8 changed files with 802 additions and 23 deletions

View File

@@ -38,6 +38,13 @@ from crewai.events.types.tool_usage_events import (
ToolUsageStartedEvent,
)
from crewai.llms.base_llm import BaseLLM
from crewai.llms.constants import (
ANTHROPIC_MODELS,
AZURE_MODELS,
BEDROCK_MODELS,
GEMINI_MODELS,
OPENAI_MODELS,
)
from crewai.utilities import InternalInstructor
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
@@ -323,18 +330,64 @@ class LLM(BaseLLM):
completion_cost: float | None = None
def __new__(cls, model: str, is_litellm: bool = False, **kwargs: Any) -> LLM:
"""Factory method that routes to native SDK or falls back to LiteLLM."""
"""Factory method that routes to native SDK or falls back to LiteLLM.
Routing priority:
1. If 'provider' kwarg is present, use that provider with constants
2. If only 'model' kwarg, use constants to infer provider
3. If "/" in model name:
- Check if prefix is a native provider (openai/anthropic/azure/bedrock/gemini)
- If yes, validate model against constants
- If valid, route to native SDK; otherwise route to LiteLLM
"""
if not model or not isinstance(model, str):
raise ValueError("Model must be a non-empty string")
provider = model.partition("/")[0] if "/" in model else "openai"
explicit_provider = kwargs.get("provider")
native_class = cls._get_native_provider(provider)
if explicit_provider:
provider = explicit_provider
use_native = True
model_string = model
elif "/" in model:
prefix, _, model_part = model.partition("/")
provider_mapping = {
"openai": "openai",
"anthropic": "anthropic",
"claude": "anthropic",
"azure": "azure",
"azure_openai": "azure",
"google": "gemini",
"gemini": "gemini",
"bedrock": "bedrock",
"aws": "bedrock",
}
canonical_provider = provider_mapping.get(prefix.lower())
if canonical_provider and cls._validate_model_in_constants(
model_part, canonical_provider
):
provider = canonical_provider
use_native = True
model_string = model_part
else:
provider = prefix
use_native = False
model_string = model_part
else:
provider = cls._infer_provider_from_model(model)
use_native = True
model_string = model
native_class = cls._get_native_provider(provider) if use_native else None
if native_class and not is_litellm and provider in SUPPORTED_NATIVE_PROVIDERS:
try:
model_string = model.partition("/")[2] if "/" in model else model
# Remove 'provider' from kwargs if it exists to avoid duplicate keyword argument
kwargs_copy = {k: v for k, v in kwargs.items() if k != 'provider'}
return cast(
Self, native_class(model=model_string, provider=provider, **kwargs)
Self, native_class(model=model_string, provider=provider, **kwargs_copy)
)
except NotImplementedError:
raise
@@ -351,6 +404,63 @@ class LLM(BaseLLM):
instance.is_litellm = True
return instance
@classmethod
def _validate_model_in_constants(cls, model: str, provider: str) -> bool:
"""Validate if a model name exists in the provider's constants.
Args:
model: The model name to validate
provider: The provider to check against (canonical name)
Returns:
True if the model exists in the provider's constants, False otherwise
"""
if provider == "openai":
return model in OPENAI_MODELS
if provider == "anthropic" or provider == "claude":
return model in ANTHROPIC_MODELS
if provider == "gemini":
return model in GEMINI_MODELS
if provider == "bedrock":
return model in BEDROCK_MODELS
if provider == "azure":
# azure does not provide a list of available models, determine a better way to handle this
return True
return False
@classmethod
def _infer_provider_from_model(cls, model: str) -> str:
"""Infer the provider from the model name.
Args:
model: The model name without provider prefix
Returns:
The inferred provider name, defaults to "openai"
"""
if model in OPENAI_MODELS:
return "openai"
if model in ANTHROPIC_MODELS:
return "anthropic"
if model in GEMINI_MODELS:
return "gemini"
if model in BEDROCK_MODELS:
return "bedrock"
if model in AZURE_MODELS:
return "azure"
return "openai"
@classmethod
def _get_native_provider(cls, provider: str) -> type | None:
"""Get native provider class if available."""

View File

@@ -0,0 +1,558 @@
from typing import Literal, TypeAlias
OpenAIModels: TypeAlias = Literal[
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-instruct",
"gpt-3.5-turbo-instruct-0914",
"gpt-4",
"gpt-4-0125-preview",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
"gpt-4-vision-preview",
"gpt-4.1",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano",
"gpt-4.1-nano-2025-04-14",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"gpt-4o-audio-preview",
"gpt-4o-audio-preview-2024-10-01",
"gpt-4o-audio-preview-2024-12-17",
"gpt-4o-audio-preview-2025-06-03",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4o-mini-audio-preview",
"gpt-4o-mini-audio-preview-2024-12-17",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-4o-mini-search-preview",
"gpt-4o-mini-search-preview-2025-03-11",
"gpt-4o-mini-transcribe",
"gpt-4o-mini-tts",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-search-preview",
"gpt-4o-search-preview-2025-03-11",
"gpt-4o-transcribe",
"gpt-4o-transcribe-diarize",
"gpt-5",
"gpt-5-2025-08-07",
"gpt-5-chat",
"gpt-5-chat-latest",
"gpt-5-codex",
"gpt-5-mini",
"gpt-5-mini-2025-08-07",
"gpt-5-nano",
"gpt-5-nano-2025-08-07",
"gpt-5-pro",
"gpt-5-pro-2025-10-06",
"gpt-5-search-api",
"gpt-5-search-api-2025-10-14",
"gpt-audio",
"gpt-audio-2025-08-28",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-image-1",
"gpt-image-1-mini",
"gpt-realtime",
"gpt-realtime-2025-08-28",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"o1",
"o1-preview",
"o1-2024-12-17",
"o1-mini",
"o1-mini-2024-09-12",
"o1-pro",
"o1-pro-2025-03-19",
"o3-mini",
"o3",
"o4-mini",
"whisper-1",
]
OPENAI_MODELS: list[OpenAIModels] = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-instruct",
"gpt-3.5-turbo-instruct-0914",
"gpt-4",
"gpt-4-0125-preview",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
"gpt-4-vision-preview",
"gpt-4.1",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano",
"gpt-4.1-nano-2025-04-14",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"gpt-4o-audio-preview",
"gpt-4o-audio-preview-2024-10-01",
"gpt-4o-audio-preview-2024-12-17",
"gpt-4o-audio-preview-2025-06-03",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4o-mini-audio-preview",
"gpt-4o-mini-audio-preview-2024-12-17",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-4o-mini-search-preview",
"gpt-4o-mini-search-preview-2025-03-11",
"gpt-4o-mini-transcribe",
"gpt-4o-mini-tts",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-search-preview",
"gpt-4o-search-preview-2025-03-11",
"gpt-4o-transcribe",
"gpt-4o-transcribe-diarize",
"gpt-5",
"gpt-5-2025-08-07",
"gpt-5-chat",
"gpt-5-chat-latest",
"gpt-5-codex",
"gpt-5-mini",
"gpt-5-mini-2025-08-07",
"gpt-5-nano",
"gpt-5-nano-2025-08-07",
"gpt-5-pro",
"gpt-5-pro-2025-10-06",
"gpt-5-search-api",
"gpt-5-search-api-2025-10-14",
"gpt-audio",
"gpt-audio-2025-08-28",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-image-1",
"gpt-image-1-mini",
"gpt-realtime",
"gpt-realtime-2025-08-28",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"o1",
"o1-preview",
"o1-2024-12-17",
"o1-mini",
"o1-mini-2024-09-12",
"o1-pro",
"o1-pro-2025-03-19",
"o3-mini",
"o3",
"o4-mini",
"whisper-1",
]
AnthropicModels: TypeAlias = Literal[
"claude-3-7-sonnet-latest",
"claude-3-7-sonnet-20250219",
"claude-3-5-haiku-latest",
"claude-3-5-haiku-20241022",
"claude-haiku-4-5",
"claude-haiku-4-5-20251001",
"claude-sonnet-4-20250514",
"claude-sonnet-4-0",
"claude-4-sonnet-20250514",
"claude-sonnet-4-5",
"claude-sonnet-4-5-20250929",
"claude-3-5-sonnet-latest",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-20240620",
"claude-opus-4-0",
"claude-opus-4-20250514",
"claude-4-opus-20250514",
"claude-opus-4-1",
"claude-opus-4-1-20250805",
"claude-3-opus-latest",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-latest",
"claude-3-haiku-20240307",
]
ANTHROPIC_MODELS: list[AnthropicModels] = [
"claude-3-7-sonnet-latest",
"claude-3-7-sonnet-20250219",
"claude-3-5-haiku-latest",
"claude-3-5-haiku-20241022",
"claude-haiku-4-5",
"claude-haiku-4-5-20251001",
"claude-sonnet-4-20250514",
"claude-sonnet-4-0",
"claude-4-sonnet-20250514",
"claude-sonnet-4-5",
"claude-sonnet-4-5-20250929",
"claude-3-5-sonnet-latest",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-20240620",
"claude-opus-4-0",
"claude-opus-4-20250514",
"claude-4-opus-20250514",
"claude-opus-4-1",
"claude-opus-4-1-20250805",
"claude-3-opus-latest",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-latest",
"claude-3-haiku-20240307",
]
GeminiModels: TypeAlias = Literal[
"gemini-2.5-pro",
"gemini-2.5-pro-preview-03-25",
"gemini-2.5-pro-preview-05-06",
"gemini-2.5-pro-preview-06-05",
"gemini-2.5-flash",
"gemini-2.5-flash-preview-05-20",
"gemini-2.5-flash-preview-04-17",
"gemini-2.5-flash-image",
"gemini-2.5-flash-image-preview",
"gemini-2.5-flash-lite",
"gemini-2.5-flash-lite-preview-06-17",
"gemini-2.5-flash-preview-09-2025",
"gemini-2.5-flash-lite-preview-09-2025",
"gemini-2.5-flash-preview-tts",
"gemini-2.5-pro-preview-tts",
"gemini-2.5-computer-use-preview-10-2025",
"gemini-2.0-flash",
"gemini-2.0-flash-001",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-exp-image-generation",
"gemini-2.0-flash-lite",
"gemini-2.0-flash-lite-001",
"gemini-2.0-flash-lite-preview",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-preview-image-generation",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-thinking-exp-1219",
"gemini-2.0-pro-exp",
"gemini-2.0-pro-exp-02-05",
"gemini-exp-1206",
"gemini-1.5-pro",
"gemini-1.5-flash",
"gemini-1.5-flash-8b",
"gemini-flash-latest",
"gemini-flash-lite-latest",
"gemini-pro-latest",
"gemini-2.0-flash-live-001",
"gemini-live-2.5-flash-preview",
"gemini-2.5-flash-live-preview",
"gemini-robotics-er-1.5-preview",
"gemini-gemma-2-27b-it",
"gemini-gemma-2-9b-it",
"gemma-3-1b-it",
"gemma-3-4b-it",
"gemma-3-12b-it",
"gemma-3-27b-it",
"gemma-3n-e2b-it",
"gemma-3n-e4b-it",
"learnlm-2.0-flash-experimental",
]
GEMINI_MODELS: list[GeminiModels] = [
"gemini-2.5-pro",
"gemini-2.5-pro-preview-03-25",
"gemini-2.5-pro-preview-05-06",
"gemini-2.5-pro-preview-06-05",
"gemini-2.5-flash",
"gemini-2.5-flash-preview-05-20",
"gemini-2.5-flash-preview-04-17",
"gemini-2.5-flash-image",
"gemini-2.5-flash-image-preview",
"gemini-2.5-flash-lite",
"gemini-2.5-flash-lite-preview-06-17",
"gemini-2.5-flash-preview-09-2025",
"gemini-2.5-flash-lite-preview-09-2025",
"gemini-2.5-flash-preview-tts",
"gemini-2.5-pro-preview-tts",
"gemini-2.5-computer-use-preview-10-2025",
"gemini-2.0-flash",
"gemini-2.0-flash-001",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-exp-image-generation",
"gemini-2.0-flash-lite",
"gemini-2.0-flash-lite-001",
"gemini-2.0-flash-lite-preview",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-preview-image-generation",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-thinking-exp-1219",
"gemini-2.0-pro-exp",
"gemini-2.0-pro-exp-02-05",
"gemini-exp-1206",
"gemini-1.5-pro",
"gemini-1.5-flash",
"gemini-1.5-flash-8b",
"gemini-flash-latest",
"gemini-flash-lite-latest",
"gemini-pro-latest",
"gemini-2.0-flash-live-001",
"gemini-live-2.5-flash-preview",
"gemini-2.5-flash-live-preview",
"gemini-robotics-er-1.5-preview",
"gemini-gemma-2-27b-it",
"gemini-gemma-2-9b-it",
"gemma-3-1b-it",
"gemma-3-4b-it",
"gemma-3-12b-it",
"gemma-3-27b-it",
"gemma-3n-e2b-it",
"gemma-3n-e4b-it",
"learnlm-2.0-flash-experimental",
]
AzureModels: TypeAlias = Literal[
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-35-turbo",
"gpt-35-turbo-0125",
"gpt-35-turbo-1106",
"gpt-35-turbo-16k-0613",
"gpt-35-turbo-instruct-0914",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-0125-preview",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-vision",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"gpt-4o-mini",
"gpt-5",
"o1",
"o1-mini",
"o1-preview",
"o3-mini",
"o3",
"o4-mini",
]
AZURE_MODELS: list[AzureModels] = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-35-turbo",
"gpt-35-turbo-0125",
"gpt-35-turbo-1106",
"gpt-35-turbo-16k-0613",
"gpt-35-turbo-instruct-0914",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-0125-preview",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-vision",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"gpt-4o-mini",
"gpt-5",
"o1",
"o1-mini",
"o1-preview",
"o3-mini",
"o3",
"o4-mini",
]
BedrockModels: TypeAlias = Literal[
"ai21.jamba-1-5-large-v1:0",
"ai21.jamba-1-5-mini-v1:0",
"amazon.nova-lite-v1:0",
"amazon.nova-lite-v1:0:24k",
"amazon.nova-lite-v1:0:300k",
"amazon.nova-micro-v1:0",
"amazon.nova-micro-v1:0:128k",
"amazon.nova-micro-v1:0:24k",
"amazon.nova-premier-v1:0",
"amazon.nova-premier-v1:0:1000k",
"amazon.nova-premier-v1:0:20k",
"amazon.nova-premier-v1:0:8k",
"amazon.nova-premier-v1:0:mm",
"amazon.nova-pro-v1:0",
"amazon.nova-pro-v1:0:24k",
"amazon.nova-pro-v1:0:300k",
"amazon.titan-text-express-v1",
"amazon.titan-text-express-v1:0:8k",
"amazon.titan-text-lite-v1",
"amazon.titan-text-lite-v1:0:4k",
"amazon.titan-tg1-large",
"anthropic.claude-3-5-haiku-20241022-v1:0",
"anthropic.claude-3-5-sonnet-20240620-v1:0",
"anthropic.claude-3-5-sonnet-20241022-v2:0",
"anthropic.claude-3-7-sonnet-20250219-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0:200k",
"anthropic.claude-3-haiku-20240307-v1:0:48k",
"anthropic.claude-3-opus-20240229-v1:0",
"anthropic.claude-3-opus-20240229-v1:0:12k",
"anthropic.claude-3-opus-20240229-v1:0:200k",
"anthropic.claude-3-opus-20240229-v1:0:28k",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-sonnet-20240229-v1:0:200k",
"anthropic.claude-3-sonnet-20240229-v1:0:28k",
"anthropic.claude-haiku-4-5-20251001-v1:0",
"anthropic.claude-instant-v1:2:100k",
"anthropic.claude-opus-4-1-20250805-v1:0",
"anthropic.claude-opus-4-20250514-v1:0",
"anthropic.claude-sonnet-4-20250514-v1:0",
"anthropic.claude-sonnet-4-5-20250929-v1:0",
"anthropic.claude-v2:0:100k",
"anthropic.claude-v2:0:18k",
"anthropic.claude-v2:1:18k",
"anthropic.claude-v2:1:200k",
"cohere.command-r-plus-v1:0",
"cohere.command-r-v1:0",
"cohere.rerank-v3-5:0",
"deepseek.r1-v1:0",
"meta.llama3-1-70b-instruct-v1:0",
"meta.llama3-1-8b-instruct-v1:0",
"meta.llama3-2-11b-instruct-v1:0",
"meta.llama3-2-1b-instruct-v1:0",
"meta.llama3-2-3b-instruct-v1:0",
"meta.llama3-2-90b-instruct-v1:0",
"meta.llama3-3-70b-instruct-v1:0",
"meta.llama3-70b-instruct-v1:0",
"meta.llama3-8b-instruct-v1:0",
"meta.llama4-maverick-17b-instruct-v1:0",
"meta.llama4-scout-17b-instruct-v1:0",
"mistral.mistral-7b-instruct-v0:2",
"mistral.mistral-large-2402-v1:0",
"mistral.mistral-small-2402-v1:0",
"mistral.mixtral-8x7b-instruct-v0:1",
"mistral.pixtral-large-2502-v1:0",
"openai.gpt-oss-120b-1:0",
"openai.gpt-oss-20b-1:0",
"qwen.qwen3-32b-v1:0",
"qwen.qwen3-coder-30b-a3b-v1:0",
"twelvelabs.pegasus-1-2-v1:0",
]
BEDROCK_MODELS: list[BedrockModels] = [
"ai21.jamba-1-5-large-v1:0",
"ai21.jamba-1-5-mini-v1:0",
"amazon.nova-lite-v1:0",
"amazon.nova-lite-v1:0:24k",
"amazon.nova-lite-v1:0:300k",
"amazon.nova-micro-v1:0",
"amazon.nova-micro-v1:0:128k",
"amazon.nova-micro-v1:0:24k",
"amazon.nova-premier-v1:0",
"amazon.nova-premier-v1:0:1000k",
"amazon.nova-premier-v1:0:20k",
"amazon.nova-premier-v1:0:8k",
"amazon.nova-premier-v1:0:mm",
"amazon.nova-pro-v1:0",
"amazon.nova-pro-v1:0:24k",
"amazon.nova-pro-v1:0:300k",
"amazon.titan-text-express-v1",
"amazon.titan-text-express-v1:0:8k",
"amazon.titan-text-lite-v1",
"amazon.titan-text-lite-v1:0:4k",
"amazon.titan-tg1-large",
"anthropic.claude-3-5-haiku-20241022-v1:0",
"anthropic.claude-3-5-sonnet-20240620-v1:0",
"anthropic.claude-3-5-sonnet-20241022-v2:0",
"anthropic.claude-3-7-sonnet-20250219-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0:200k",
"anthropic.claude-3-haiku-20240307-v1:0:48k",
"anthropic.claude-3-opus-20240229-v1:0",
"anthropic.claude-3-opus-20240229-v1:0:12k",
"anthropic.claude-3-opus-20240229-v1:0:200k",
"anthropic.claude-3-opus-20240229-v1:0:28k",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-sonnet-20240229-v1:0:200k",
"anthropic.claude-3-sonnet-20240229-v1:0:28k",
"anthropic.claude-haiku-4-5-20251001-v1:0",
"anthropic.claude-instant-v1:2:100k",
"anthropic.claude-opus-4-1-20250805-v1:0",
"anthropic.claude-opus-4-20250514-v1:0",
"anthropic.claude-sonnet-4-20250514-v1:0",
"anthropic.claude-sonnet-4-5-20250929-v1:0",
"anthropic.claude-v2:0:100k",
"anthropic.claude-v2:0:18k",
"anthropic.claude-v2:1:18k",
"anthropic.claude-v2:1:200k",
"cohere.command-r-plus-v1:0",
"cohere.command-r-v1:0",
"cohere.rerank-v3-5:0",
"deepseek.r1-v1:0",
"meta.llama3-1-70b-instruct-v1:0",
"meta.llama3-1-8b-instruct-v1:0",
"meta.llama3-2-11b-instruct-v1:0",
"meta.llama3-2-1b-instruct-v1:0",
"meta.llama3-2-3b-instruct-v1:0",
"meta.llama3-2-90b-instruct-v1:0",
"meta.llama3-3-70b-instruct-v1:0",
"meta.llama3-70b-instruct-v1:0",
"meta.llama3-8b-instruct-v1:0",
"meta.llama4-maverick-17b-instruct-v1:0",
"meta.llama4-scout-17b-instruct-v1:0",
"mistral.mistral-7b-instruct-v0:2",
"mistral.mistral-large-2402-v1:0",
"mistral.mistral-small-2402-v1:0",
"mistral.mixtral-8x7b-instruct-v0:1",
"mistral.pixtral-large-2502-v1:0",
"openai.gpt-oss-120b-1:0",
"openai.gpt-oss-20b-1:0",
"qwen.qwen3-32b-v1:0",
"qwen.qwen3-coder-30b-a3b-v1:0",
"twelvelabs.pegasus-1-2-v1:0",
]

View File

@@ -36,7 +36,7 @@ def test_anthropic_completion_is_used_when_claude_provider():
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
assert llm.provider == "claude"
assert llm.provider == "anthropic"
assert llm.model == "claude-3-5-sonnet-20241022"

View File

@@ -39,7 +39,7 @@ def test_azure_completion_is_used_when_azure_openai_provider():
from crewai.llms.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
assert llm.provider == "azure_openai"
assert llm.provider == "azure"
assert llm.model == "gpt-4"

View File

@@ -24,7 +24,7 @@ def test_gemini_completion_is_used_when_google_provider():
llm = LLM(model="google/gemini-2.0-flash-001")
assert llm.__class__.__name__ == "GeminiCompletion"
assert llm.provider == "google"
assert llm.provider == "gemini"
assert llm.model == "gemini-2.0-flash-001"

View File

@@ -154,7 +154,7 @@ class TestGeminiProviderInterceptor:
# Gemini provider should raise NotImplementedError
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="gemini/gemini-pro",
model="gemini/gemini-2.5-pro",
interceptor=interceptor,
api_key="test-gemini-key",
)
@@ -169,7 +169,7 @@ class TestGeminiProviderInterceptor:
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="gemini/gemini-pro",
model="gemini/gemini-2.5-pro",
interceptor=interceptor,
api_key="test-gemini-key",
)
@@ -181,7 +181,7 @@ class TestGeminiProviderInterceptor:
def test_gemini_without_interceptor_works(self) -> None:
"""Test that Gemini LLM works without interceptor."""
llm = LLM(
model="gemini/gemini-pro",
model="gemini/gemini-2.5-pro",
api_key="test-gemini-key",
)
@@ -231,7 +231,7 @@ class TestUnsupportedProviderMessages:
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="gemini/gemini-pro",
model="gemini/gemini-2.5-pro",
interceptor=interceptor,
api_key="test-gemini-key",
)
@@ -282,7 +282,7 @@ class TestProviderSupportMatrix:
# Gemini - NOT SUPPORTED
with pytest.raises(NotImplementedError):
LLM(
model="gemini/gemini-pro",
model="gemini/gemini-2.5-pro",
interceptor=interceptor,
api_key="test",
)
@@ -315,5 +315,5 @@ class TestProviderSupportMatrix:
assert not hasattr(bedrock_llm, 'interceptor') or bedrock_llm.interceptor is None
# Gemini - doesn't have interceptor attribute
gemini_llm = LLM(model="gemini/gemini-pro", api_key="test")
assert not hasattr(gemini_llm, 'interceptor') or gemini_llm.interceptor is None
gemini_llm = LLM(model="gemini/gemini-2.5-pro", api_key="test")
assert not hasattr(gemini_llm, 'interceptor') or gemini_llm.interceptor is None

View File

@@ -16,7 +16,7 @@ def test_openai_completion_is_used_when_openai_provider():
"""
Test that OpenAICompletion from completion.py is used when LLM uses provider 'openai'
"""
llm = LLM(model="openai/gpt-4o")
llm = LLM(model="gpt-4o")
assert llm.__class__.__name__ == "OpenAICompletion"
assert llm.provider == "openai"
@@ -70,7 +70,7 @@ def test_openai_completion_module_is_imported():
del sys.modules[module_name]
# Create LLM instance - this should trigger the import
LLM(model="openai/gpt-4o")
LLM(model="gpt-4o")
# Verify the module was imported
assert module_name in sys.modules
@@ -97,7 +97,7 @@ def test_native_openai_raises_error_when_initialization_fails():
# This should raise ImportError, not fall back to LiteLLM
with pytest.raises(ImportError) as excinfo:
LLM(model="openai/gpt-4o")
LLM(model="gpt-4o")
assert "Error importing native provider" in str(excinfo.value)
assert "Native SDK failed" in str(excinfo.value)
@@ -108,7 +108,7 @@ def test_openai_completion_initialization_parameters():
Test that OpenAICompletion is initialized with correct parameters
"""
llm = LLM(
model="openai/gpt-4o",
model="gpt-4o",
temperature=0.7,
max_tokens=1000,
api_key="test-key"
@@ -311,7 +311,7 @@ def test_openai_completion_call_returns_usage_metrics():
role="Research Assistant",
goal="Find information about the population of Tokyo",
backstory="You are a helpful research assistant.",
llm=LLM(model="openai/gpt-4o"),
llm=LLM(model="gpt-4o"),
verbose=True,
)
@@ -331,6 +331,7 @@ def test_openai_completion_call_returns_usage_metrics():
assert result.token_usage.cached_prompt_tokens == 0
@pytest.mark.skip(reason="Allow for litellm")
def test_openai_raises_error_when_model_not_supported():
"""Test that OpenAICompletion raises ValueError when model not supported"""
@@ -354,7 +355,7 @@ def test_openai_client_setup_with_extra_arguments():
Test that OpenAICompletion is initialized with correct parameters
"""
llm = LLM(
model="openai/gpt-4o",
model="gpt-4o",
temperature=0.7,
max_tokens=1000,
top_p=0.5,
@@ -391,7 +392,7 @@ def test_extra_arguments_are_passed_to_openai_completion():
"""
Test that extra arguments are passed to OpenAICompletion
"""
llm = LLM(model="openai/gpt-4o", temperature=0.7, max_tokens=1000, top_p=0.5, max_retries=3)
llm = LLM(model="gpt-4o", temperature=0.7, max_tokens=1000, top_p=0.5, max_retries=3)
with patch.object(llm.client.chat.completions, 'create') as mock_create:
mock_create.return_value = MagicMock(

View File

@@ -710,7 +710,7 @@ def test_native_provider_raises_error_when_supported_but_fails():
mock_get_native.return_value = mock_provider
with pytest.raises(ImportError) as excinfo:
LLM(model="openai/gpt-4", is_litellm=False)
LLM(model="gpt-4", is_litellm=False)
assert "Error importing native provider" in str(excinfo.value)
assert "Native provider initialization failed" in str(excinfo.value)
@@ -725,3 +725,113 @@ def test_native_provider_falls_back_to_litellm_when_not_in_supported_list():
# Should fall back to LiteLLM
assert llm.is_litellm is True
assert llm.model == "groq/llama-3.1-70b-versatile"
def test_prefixed_models_with_valid_constants_use_native_sdk():
"""Test that models with native provider prefixes use native SDK when model is in constants."""
# Test openai/ prefix with actual OpenAI model in constants → Native SDK
with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}):
llm = LLM(model="openai/gpt-4o", is_litellm=False)
assert llm.is_litellm is False
assert llm.provider == "openai"
# Test anthropic/ prefix with Claude model in constants → Native SDK
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
llm2 = LLM(model="anthropic/claude-opus-4-0", is_litellm=False)
assert llm2.is_litellm is False
assert llm2.provider == "anthropic"
# Test gemini/ prefix with Gemini model in constants → Native SDK
with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-key"}):
llm3 = LLM(model="gemini/gemini-2.5-pro", is_litellm=False)
assert llm3.is_litellm is False
assert llm3.provider == "gemini"
def test_prefixed_models_with_invalid_constants_use_litellm():
"""Test that models with native provider prefixes use LiteLLM when model is NOT in constants."""
# Test openai/ prefix with non-OpenAI model (not in OPENAI_MODELS) → LiteLLM
llm = LLM(model="openai/gemini-2.5-flash", is_litellm=False)
assert llm.is_litellm is True
assert llm.model == "openai/gemini-2.5-flash"
# Test openai/ prefix with unknown future model → LiteLLM
llm2 = LLM(model="openai/gpt-future-6", is_litellm=False)
assert llm2.is_litellm is True
assert llm2.model == "openai/gpt-future-6"
# Test anthropic/ prefix with non-Anthropic model → LiteLLM
llm3 = LLM(model="anthropic/gpt-4o", is_litellm=False)
assert llm3.is_litellm is True
assert llm3.model == "anthropic/gpt-4o"
def test_prefixed_models_with_non_native_providers_use_litellm():
"""Test that models with non-native provider prefixes always use LiteLLM."""
# Test groq/ prefix (not a native provider) → LiteLLM
llm = LLM(model="groq/llama-3.3-70b", is_litellm=False)
assert llm.is_litellm is True
assert llm.model == "groq/llama-3.3-70b"
# Test together/ prefix (not a native provider) → LiteLLM
llm2 = LLM(model="together/qwen-2.5-72b", is_litellm=False)
assert llm2.is_litellm is True
assert llm2.model == "together/qwen-2.5-72b"
def test_unprefixed_models_use_native_sdk():
"""Test that unprefixed models use native SDK when model is in constants."""
# gpt-4o is in OPENAI_MODELS → Native OpenAI SDK
with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}):
llm = LLM(model="gpt-4o", is_litellm=False)
assert llm.is_litellm is False
assert llm.provider == "openai"
# claude-opus-4-0 is in ANTHROPIC_MODELS → Native Anthropic SDK
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
llm2 = LLM(model="claude-opus-4-0", is_litellm=False)
assert llm2.is_litellm is False
assert llm2.provider == "anthropic"
# gemini-2.5-pro is in GEMINI_MODELS → Native Gemini SDK
with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-key"}):
llm3 = LLM(model="gemini-2.5-pro", is_litellm=False)
assert llm3.is_litellm is False
assert llm3.provider == "gemini"
def test_explicit_provider_kwarg_takes_priority():
"""Test that explicit provider kwarg takes priority over model name inference."""
# Explicit provider=openai should use OpenAI even if model name suggests otherwise
with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}):
llm = LLM(model="gpt-4o", provider="openai", is_litellm=False)
assert llm.is_litellm is False
assert llm.provider == "openai"
# Explicit provider for a model with "/" should still use that provider
with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}):
llm2 = LLM(model="gpt-4o", provider="openai", is_litellm=False)
assert llm2.is_litellm is False
assert llm2.provider == "openai"
def test_validate_model_in_constants():
"""Test the _validate_model_in_constants method."""
# OpenAI models
assert LLM._validate_model_in_constants("gpt-4o", "openai") is True
assert LLM._validate_model_in_constants("gpt-future-6", "openai") is False
# Anthropic models
assert LLM._validate_model_in_constants("claude-opus-4-0", "claude") is True
assert LLM._validate_model_in_constants("claude-future-5", "claude") is False
# Gemini models
assert LLM._validate_model_in_constants("gemini-2.5-pro", "gemini") is True
assert LLM._validate_model_in_constants("gemini-future", "gemini") is False
# Azure models
assert LLM._validate_model_in_constants("gpt-4o", "azure") is True
assert LLM._validate_model_in_constants("gpt-35-turbo", "azure") is True
# Bedrock models
assert LLM._validate_model_in_constants("anthropic.claude-opus-4-1-20250805-v1:0", "bedrock") is True