diff --git a/src/crewai/cli/constants.py b/src/crewai/cli/constants.py index fec0b6384..62d161c71 100644 --- a/src/crewai/cli/constants.py +++ b/src/crewai/cli/constants.py @@ -123,6 +123,7 @@ MODELS = { "claude-3-haiku-20240307", ], "gemini": [ + "gemini/gemini-2.5-pro-exp-03-25", "gemini/gemini-1.5-flash", "gemini/gemini-1.5-pro", "gemini/gemini-gemma-2-9b-it", diff --git a/src/crewai/llm.py b/src/crewai/llm.py index 741544662..661ed9426 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -81,6 +81,7 @@ LLM_CONTEXT_WINDOW_SIZES = { "o1-mini": 128000, "o3-mini": 200000, # Based on official o3-mini specifications # gemini + "gemini-2.5-pro-exp-03-25": 2097152, "gemini-2.0-flash": 1048576, "gemini-1.5-pro": 2097152, "gemini-1.5-flash": 1048576, diff --git a/tests/test_gemini_2_5_support.py b/tests/test_gemini_2_5_support.py new file mode 100644 index 000000000..7d4902c8b --- /dev/null +++ b/tests/test_gemini_2_5_support.py @@ -0,0 +1,14 @@ +import pytest +from crewai.llm import LLM + +def test_get_custom_llm_provider_gemini_2_5(): + """Test that the Gemini 2.5 model is correctly identified as a Gemini provider.""" + llm = LLM(model="gemini/gemini-2.5-pro-exp-03-25") + assert llm._get_custom_llm_provider() == "gemini" + +def test_gemini_2_5_context_window_size(): + """Test that the Gemini 2.5 model has the correct context window size.""" + llm = LLM(model="gemini-2.5-pro-exp-03-25") + from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO + expected_size = int(2097152 * CONTEXT_WINDOW_USAGE_RATIO) + assert llm.get_context_window_size() == expected_size