mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-28 09:38:17 +00:00
Compare commits
6 Commits
llm-event-
...
devin/1743
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9829692250 | ||
|
|
c1bf544b1b | ||
|
|
b2314a3d30 | ||
|
|
baeb0dab48 | ||
|
|
3b378dc5a6 | ||
|
|
5d4395b0b0 |
@@ -123,6 +123,7 @@ MODELS = {
|
|||||||
"claude-3-haiku-20240307",
|
"claude-3-haiku-20240307",
|
||||||
],
|
],
|
||||||
"gemini": [
|
"gemini": [
|
||||||
|
"gemini/gemini-2.5-pro-exp-03-25", # Experimental release - March 2025
|
||||||
"gemini/gemini-1.5-flash",
|
"gemini/gemini-1.5-flash",
|
||||||
"gemini/gemini-1.5-pro",
|
"gemini/gemini-1.5-pro",
|
||||||
"gemini/gemini-gemma-2-9b-it",
|
"gemini/gemini-gemma-2-9b-it",
|
||||||
|
|||||||
@@ -71,20 +71,26 @@ class FilteredStream:
|
|||||||
return self._original_stream.flush()
|
return self._original_stream.flush()
|
||||||
|
|
||||||
|
|
||||||
|
PRO_CONTEXT_SIZE = 2097152 # 2M tokens
|
||||||
|
FLASH_CONTEXT_SIZE = 1048576 # 1M tokens
|
||||||
|
GPT4_TURBO_CONTEXT_SIZE = 128000
|
||||||
|
CLAUDE_LARGE_CONTEXT_SIZE = 200000
|
||||||
|
|
||||||
LLM_CONTEXT_WINDOW_SIZES = {
|
LLM_CONTEXT_WINDOW_SIZES = {
|
||||||
# openai
|
# openai
|
||||||
"gpt-4": 8192,
|
"gpt-4": 8192,
|
||||||
"gpt-4o": 128000,
|
"gpt-4o": GPT4_TURBO_CONTEXT_SIZE,
|
||||||
"gpt-4o-mini": 128000,
|
"gpt-4o-mini": GPT4_TURBO_CONTEXT_SIZE,
|
||||||
"gpt-4-turbo": 128000,
|
"gpt-4-turbo": GPT4_TURBO_CONTEXT_SIZE,
|
||||||
"o1-preview": 128000,
|
"o1-preview": GPT4_TURBO_CONTEXT_SIZE,
|
||||||
"o1-mini": 128000,
|
"o1-mini": GPT4_TURBO_CONTEXT_SIZE,
|
||||||
"o3-mini": 200000, # Based on official o3-mini specifications
|
"o3-mini": CLAUDE_LARGE_CONTEXT_SIZE, # Based on official o3-mini specifications
|
||||||
# gemini
|
# gemini
|
||||||
"gemini-2.0-flash": 1048576,
|
"gemini-2.5-pro-exp-03-25": PRO_CONTEXT_SIZE,
|
||||||
"gemini-1.5-pro": 2097152,
|
"gemini-2.0-flash": FLASH_CONTEXT_SIZE,
|
||||||
"gemini-1.5-flash": 1048576,
|
"gemini-1.5-pro": PRO_CONTEXT_SIZE,
|
||||||
"gemini-1.5-flash-8b": 1048576,
|
"gemini-1.5-flash": FLASH_CONTEXT_SIZE,
|
||||||
|
"gemini-1.5-flash-8b": FLASH_CONTEXT_SIZE,
|
||||||
# deepseek
|
# deepseek
|
||||||
"deepseek-chat": 128000,
|
"deepseek-chat": 128000,
|
||||||
# groq
|
# groq
|
||||||
@@ -884,10 +890,16 @@ class LLM(BaseLLM):
|
|||||||
Derives the custom_llm_provider from the model string.
|
Derives the custom_llm_provider from the model string.
|
||||||
- For example, if the model is "openrouter/deepseek/deepseek-chat", returns "openrouter".
|
- For example, if the model is "openrouter/deepseek/deepseek-chat", returns "openrouter".
|
||||||
- If the model is "gemini/gemini-1.5-pro", returns "gemini".
|
- If the model is "gemini/gemini-1.5-pro", returns "gemini".
|
||||||
- If there is no '/', defaults to "openai".
|
- If the model starts with "gemini-", returns "gemini" only for valid Gemini models.
|
||||||
|
- If there is no '/' or recognized prefix, returns None.
|
||||||
"""
|
"""
|
||||||
if "/" in self.model:
|
if "/" in self.model:
|
||||||
return self.model.split("/")[0]
|
return self.model.split("/")[0]
|
||||||
|
if self.model.startswith("gemini-"):
|
||||||
|
valid_gemini_models = ["gemini-2.5-pro-exp-03-25", "gemini-2.0-flash",
|
||||||
|
"gemini-1.5-pro", "gemini-1.5-flash", "gemini-1.5-flash-8b"]
|
||||||
|
if self.model in valid_gemini_models:
|
||||||
|
return "gemini"
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _validate_call_params(self) -> None:
|
def _validate_call_params(self) -> None:
|
||||||
@@ -951,9 +963,19 @@ class LLM(BaseLLM):
|
|||||||
self.context_window_size = int(
|
self.context_window_size = int(
|
||||||
DEFAULT_CONTEXT_WINDOW_SIZE * CONTEXT_WINDOW_USAGE_RATIO
|
DEFAULT_CONTEXT_WINDOW_SIZE * CONTEXT_WINDOW_USAGE_RATIO
|
||||||
)
|
)
|
||||||
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
|
|
||||||
if self.model.startswith(key):
|
model_name = self.model
|
||||||
self.context_window_size = int(value * CONTEXT_WINDOW_USAGE_RATIO)
|
if "/" in model_name:
|
||||||
|
model_name = model_name.split("/", 1)[1]
|
||||||
|
|
||||||
|
if model_name in LLM_CONTEXT_WINDOW_SIZES:
|
||||||
|
self.context_window_size = int(LLM_CONTEXT_WINDOW_SIZES[model_name] * CONTEXT_WINDOW_USAGE_RATIO)
|
||||||
|
else:
|
||||||
|
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
|
||||||
|
if model_name.startswith(key):
|
||||||
|
self.context_window_size = int(value * CONTEXT_WINDOW_USAGE_RATIO)
|
||||||
|
break
|
||||||
|
|
||||||
return self.context_window_size
|
return self.context_window_size
|
||||||
|
|
||||||
def set_callbacks(self, callbacks: List[Any]):
|
def set_callbacks(self, callbacks: List[Any]):
|
||||||
|
|||||||
41
tests/test_gemini_2_5_support.py
Normal file
41
tests/test_gemini_2_5_support.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM, PRO_CONTEXT_SIZE
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_custom_llm_provider_gemini_2_5():
|
||||||
|
"""Test that the Gemini 2.5 model is correctly identified as a Gemini provider."""
|
||||||
|
llm = LLM(model="gemini/gemini-2.5-pro-exp-03-25")
|
||||||
|
assert llm._get_custom_llm_provider() == "gemini"
|
||||||
|
|
||||||
|
def test_gemini_2_5_context_window_size():
|
||||||
|
"""Test that the Gemini 2.5 model has the correct context window size."""
|
||||||
|
llm = LLM(model="gemini-2.5-pro-exp-03-25")
|
||||||
|
expected_size = int(PRO_CONTEXT_SIZE * CONTEXT_WINDOW_USAGE_RATIO)
|
||||||
|
assert llm.get_context_window_size() == expected_size
|
||||||
|
|
||||||
|
def test_gemini_2_5_invalid_model_name():
|
||||||
|
"""Test handling of invalid model name variations."""
|
||||||
|
llm = LLM(model="gemini-2.5-wrong")
|
||||||
|
assert llm._get_custom_llm_provider() != "gemini"
|
||||||
|
|
||||||
|
def test_gemini_2_5_model_parameters():
|
||||||
|
"""Test model initialization with various parameters."""
|
||||||
|
llm = LLM(
|
||||||
|
model="gemini/gemini-2.5-pro-exp-03-25",
|
||||||
|
temperature=0.7,
|
||||||
|
max_tokens=1000
|
||||||
|
)
|
||||||
|
assert llm.model == "gemini/gemini-2.5-pro-exp-03-25"
|
||||||
|
assert llm.temperature == 0.7
|
||||||
|
assert llm.max_tokens == 1000
|
||||||
|
|
||||||
|
def test_gemini_2_5_with_and_without_prefix():
|
||||||
|
"""Test that the model works with and without the 'gemini/' prefix."""
|
||||||
|
llm_with_prefix = LLM(model="gemini/gemini-2.5-pro-exp-03-25")
|
||||||
|
llm_without_prefix = LLM(model="gemini-2.5-pro-exp-03-25")
|
||||||
|
|
||||||
|
assert llm_with_prefix._get_custom_llm_provider() == "gemini"
|
||||||
|
assert llm_without_prefix._get_custom_llm_provider() == "gemini"
|
||||||
|
|
||||||
|
assert llm_with_prefix.get_context_window_size() == llm_without_prefix.get_context_window_size()
|
||||||
Reference in New Issue
Block a user