mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 15:48:29 +00:00
Add support for Llama API with LiteLLM
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -103,6 +103,17 @@ ENV_VARS = {
|
||||
"key_name": "SAMBANOVA_API_KEY",
|
||||
}
|
||||
],
|
||||
"meta-llama": [
|
||||
{
|
||||
"prompt": "Enter your LLAMA API key (press Enter to skip)",
|
||||
"key_name": "LLAMA_API_KEY",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your LLAMA API base URL (press Enter to skip)",
|
||||
"key_name": "LLAMA_API_BASE",
|
||||
"default": "https://api.llama.com/compat/v1",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@@ -119,6 +130,7 @@ PROVIDERS = [
|
||||
"azure",
|
||||
"cerebras",
|
||||
"sambanova",
|
||||
"meta-llama",
|
||||
]
|
||||
|
||||
MODELS = {
|
||||
@@ -314,6 +326,12 @@ MODELS = {
|
||||
"sambanova/Meta-Llama-3.2-3B-Instruct",
|
||||
"sambanova/Meta-Llama-3.2-1B-Instruct",
|
||||
],
|
||||
"meta-llama": [
|
||||
"meta-llama/llama-4-scout-17b-16e-instruct-fp8",
|
||||
"meta-llama/llama-4-maverick-17b-128e-instruct-fp8",
|
||||
"meta-llama/llama-3.3-70b-instruct",
|
||||
"meta-llama/llama-3.3-8b-instruct",
|
||||
],
|
||||
}
|
||||
|
||||
DEFAULT_LLM_MODEL = "gpt-4o-mini"
|
||||
@@ -321,4 +339,4 @@ DEFAULT_LLM_MODEL = "gpt-4o-mini"
|
||||
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
||||
|
||||
|
||||
LITELLM_PARAMS = ["api_key", "api_base", "api_version"]
|
||||
LITELLM_PARAMS = ["api_key", "api_base", "api_version", "llama_api_key", "llama_api_base"]
|
||||
|
||||
@@ -120,6 +120,9 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
||||
"mixtral-8x7b-32768": 32768,
|
||||
"llama-3.3-70b-versatile": 128000,
|
||||
"llama-3.3-70b-instruct": 128000,
|
||||
"llama-4-scout-17b-16e-instruct-fp8": 128000,
|
||||
"llama-4-maverick-17b-128e-instruct-fp8": 128000,
|
||||
"llama-3.3-8b-instruct": 128000,
|
||||
# sambanova
|
||||
"Meta-Llama-3.3-70B-Instruct": 131072,
|
||||
"QwQ-32B-Preview": 8192,
|
||||
|
||||
@@ -618,3 +618,26 @@ def test_handle_streaming_tool_calls_no_tools(mock_emit):
|
||||
expected_completed_llm_call=1,
|
||||
expected_final_chunk_result=response,
|
||||
)
|
||||
|
||||
|
||||
def test_llama_api_support():
|
||||
"""Test that Llama API models are correctly configured."""
|
||||
from crewai.cli.constants import MODELS, PROVIDERS, ENV_VARS, LITELLM_PARAMS
|
||||
from crewai.llm import LLM_CONTEXT_WINDOW_SIZES
|
||||
|
||||
assert "meta-llama" in PROVIDERS
|
||||
|
||||
assert "meta-llama" in MODELS
|
||||
assert len(MODELS["meta-llama"]) > 0
|
||||
|
||||
assert "meta-llama" in ENV_VARS
|
||||
assert any(env_var["key_name"] == "LLAMA_API_KEY" for env_var in ENV_VARS["meta-llama"])
|
||||
assert any(env_var["key_name"] == "LLAMA_API_BASE" for env_var in ENV_VARS["meta-llama"])
|
||||
|
||||
assert "llama_api_key" in LITELLM_PARAMS
|
||||
assert "llama_api_base" in LITELLM_PARAMS
|
||||
|
||||
for model in MODELS["meta-llama"]:
|
||||
model_name = model.split("/")[-1]
|
||||
assert any(model_name in key or key in model_name
|
||||
for key in LLM_CONTEXT_WINDOW_SIZES.keys()), f"Context window size for {model} not found"
|
||||
|
||||
Reference in New Issue
Block a user