mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-15 02:58:30 +00:00
@@ -123,6 +123,7 @@ MODELS = {
|
||||
"claude-3-haiku-20240307",
|
||||
],
|
||||
"gemini": [
|
||||
"gemini/gemini-2.5-pro-exp-03-25",
|
||||
"gemini/gemini-1.5-flash",
|
||||
"gemini/gemini-1.5-pro",
|
||||
"gemini/gemini-gemma-2-9b-it",
|
||||
|
||||
@@ -81,6 +81,7 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
||||
"o1-mini": 128000,
|
||||
"o3-mini": 200000, # Based on official o3-mini specifications
|
||||
# gemini
|
||||
"gemini-2.5-pro-exp-03-25": 2097152,
|
||||
"gemini-2.0-flash": 1048576,
|
||||
"gemini-1.5-pro": 2097152,
|
||||
"gemini-1.5-flash": 1048576,
|
||||
|
||||
14
tests/test_gemini_2_5_support.py
Normal file
14
tests/test_gemini_2_5_support.py
Normal file
@@ -0,0 +1,14 @@
|
||||
import pytest
|
||||
from crewai.llm import LLM
|
||||
|
||||
def test_get_custom_llm_provider_gemini_2_5():
|
||||
"""Test that the Gemini 2.5 model is correctly identified as a Gemini provider."""
|
||||
llm = LLM(model="gemini/gemini-2.5-pro-exp-03-25")
|
||||
assert llm._get_custom_llm_provider() == "gemini"
|
||||
|
||||
def test_gemini_2_5_context_window_size():
|
||||
"""Test that the Gemini 2.5 model has the correct context window size."""
|
||||
llm = LLM(model="gemini-2.5-pro-exp-03-25")
|
||||
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO
|
||||
expected_size = int(2097152 * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
assert llm.get_context_window_size() == expected_size
|
||||
Reference in New Issue
Block a user