Compare commits

...

2 Commits

Author SHA1 Message Date
Devin AI
56afe4fb6d Address PR feedback and fix lint issues
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-05-01 01:33:43 +00:00
Devin AI
c9ed55814e Add support for Llama API with LiteLLM
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-05-01 01:29:34 +00:00
3 changed files with 50 additions and 1 deletions

View File

@@ -103,6 +103,19 @@ ENV_VARS = {
"key_name": "SAMBANOVA_API_KEY",
}
],
"meta-llama": [
{
"prompt": "Enter your LLAMA API key (press Enter to skip)",
"key_name": "LLAMA_API_KEY",
"description": "API key for Meta's Llama API authentication",
},
{
"prompt": "Enter your LLAMA API base URL (press Enter to skip)",
"key_name": "LLAMA_API_BASE",
"default": "https://api.llama.com/compat/v1",
"description": "Base URL for Meta's Llama API with OpenAI compatibility",
},
],
}
@@ -119,6 +132,7 @@ PROVIDERS = [
"azure",
"cerebras",
"sambanova",
"meta-llama",
]
MODELS = {
@@ -314,6 +328,12 @@ MODELS = {
"sambanova/Meta-Llama-3.2-3B-Instruct",
"sambanova/Meta-Llama-3.2-1B-Instruct",
],
"meta-llama": [
"meta-llama/llama-4-scout-17b",
"meta-llama/llama-4-maverick-17b",
"meta-llama/llama-3.3-70b-instruct",
"meta-llama/llama-3.3-8b-instruct",
],
}
DEFAULT_LLM_MODEL = "gpt-4o-mini"
@@ -321,4 +341,4 @@ DEFAULT_LLM_MODEL = "gpt-4o-mini"
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
LITELLM_PARAMS = ["api_key", "api_base", "api_version"]
LITELLM_PARAMS = ["api_key", "api_base", "api_version", "llama_api_key", "llama_api_base"]

View File

@@ -120,6 +120,9 @@ LLM_CONTEXT_WINDOW_SIZES = {
"mixtral-8x7b-32768": 32768,
"llama-3.3-70b-versatile": 128000,
"llama-3.3-70b-instruct": 128000,
"llama-4-scout-17b": 128000, # Based on official Meta documentation
"llama-4-maverick-17b": 128000, # Based on official Meta documentation
"llama-3.3-8b-instruct": 128000, # Based on official Meta documentation
# sambanova
"Meta-Llama-3.3-70B-Instruct": 131072,
"QwQ-32B-Preview": 8192,

View File

@@ -618,3 +618,29 @@ def test_handle_streaming_tool_calls_no_tools(mock_emit):
expected_completed_llm_call=1,
expected_final_chunk_result=response,
)
def test_llama_api_support():
"""
Test Llama API configuration and integration.
- Verifies provider registration, model availability, environment variables, and context window sizes.
"""
from crewai.cli.constants import ENV_VARS, LITELLM_PARAMS, MODELS, PROVIDERS
from crewai.llm import LLM_CONTEXT_WINDOW_SIZES
assert "meta-llama" in PROVIDERS
assert "meta-llama" in MODELS
assert len(MODELS["meta-llama"]) > 0
assert "meta-llama" in ENV_VARS
assert any(env_var["key_name"] == "LLAMA_API_KEY" for env_var in ENV_VARS["meta-llama"])
assert any(env_var["key_name"] == "LLAMA_API_BASE" for env_var in ENV_VARS["meta-llama"])
assert "llama_api_key" in LITELLM_PARAMS
assert "llama_api_base" in LITELLM_PARAMS
for model in MODELS["meta-llama"]:
model_name = model.split("/")[-1]
assert any(model_name in key or key in model_name
for key in LLM_CONTEXT_WINDOW_SIZES.keys()), f"Context window size for {model} not found"