diff --git a/src/crewai/cli/constants.py b/src/crewai/cli/constants.py index 94c6a28ae..268b1b1ad 100644 --- a/src/crewai/cli/constants.py +++ b/src/crewai/cli/constants.py @@ -107,11 +107,13 @@ ENV_VARS = { { "prompt": "Enter your LLAMA API key (press Enter to skip)", "key_name": "LLAMA_API_KEY", + "description": "API key for Meta's Llama API authentication", }, { "prompt": "Enter your LLAMA API base URL (press Enter to skip)", "key_name": "LLAMA_API_BASE", "default": "https://api.llama.com/compat/v1", + "description": "Base URL for Meta's Llama API with OpenAI compatibility", }, ], } @@ -327,8 +329,8 @@ MODELS = { "sambanova/Meta-Llama-3.2-1B-Instruct", ], "meta-llama": [ - "meta-llama/llama-4-scout-17b-16e-instruct-fp8", - "meta-llama/llama-4-maverick-17b-128e-instruct-fp8", + "meta-llama/llama-4-scout-17b", + "meta-llama/llama-4-maverick-17b", "meta-llama/llama-3.3-70b-instruct", "meta-llama/llama-3.3-8b-instruct", ], diff --git a/src/crewai/llm.py b/src/crewai/llm.py index db4ebb5a8..ab2f3e87a 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -120,9 +120,9 @@ LLM_CONTEXT_WINDOW_SIZES = { "mixtral-8x7b-32768": 32768, "llama-3.3-70b-versatile": 128000, "llama-3.3-70b-instruct": 128000, - "llama-4-scout-17b-16e-instruct-fp8": 128000, - "llama-4-maverick-17b-128e-instruct-fp8": 128000, - "llama-3.3-8b-instruct": 128000, + "llama-4-scout-17b": 128000, # Based on official Meta documentation + "llama-4-maverick-17b": 128000, # Based on official Meta documentation + "llama-3.3-8b-instruct": 128000, # Based on official Meta documentation # sambanova "Meta-Llama-3.3-70B-Instruct": 131072, "QwQ-32B-Preview": 8192, diff --git a/tests/llm_test.py b/tests/llm_test.py index 61e5900bc..998bdc670 100644 --- a/tests/llm_test.py +++ b/tests/llm_test.py @@ -621,8 +621,11 @@ def test_handle_streaming_tool_calls_no_tools(mock_emit): def test_llama_api_support(): - """Test that Llama API models are correctly configured.""" - from crewai.cli.constants import MODELS, PROVIDERS, ENV_VARS, LITELLM_PARAMS + """ + Test Llama API configuration and integration. + - Verifies provider registration, model availability, environment variables, and context window sizes. + """ + from crewai.cli.constants import ENV_VARS, LITELLM_PARAMS, MODELS, PROVIDERS from crewai.llm import LLM_CONTEXT_WINDOW_SIZES assert "meta-llama" in PROVIDERS