Compare commits

...

7 Commits

Author SHA1 Message Date
Lucas Gomide
9829692250 Merge branch 'main' into devin/1743560186-fix-issue-2511 2025-04-11 09:41:44 -03:00
Lucas Gomide
d2caf11191 Support Python 3.10+ (on CI) and remove redundant Self imports (#2553)
* ci(workflows): add Python version matrix (3.10-3.12) for tests

* refactor: remove explicit Self import from typing

Python 3.10+ natively supports Self type annotation without explicit imports

* chore: rename external_memory file test

---------

Co-authored-by: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com>
2025-04-10 14:37:24 -04:00
Devin AI
c1bf544b1b Fix model provider detection and context window size calculation for Gemini 2.5
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-04-02 02:30:38 +00:00
Devin AI
b2314a3d30 Fix import sorting in test file
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-04-02 02:25:55 +00:00
Devin AI
baeb0dab48 Address PR comments: Add constants for context window sizes and expand test coverage
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-04-02 02:24:30 +00:00
Devin AI
3b378dc5a6 Fix lint errors in test file
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-04-02 02:19:57 +00:00
Devin AI
5d4395b0b0 Fixes #2511
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-04-02 02:17:43 +00:00
7 changed files with 87 additions and 21 deletions

View File

@@ -12,6 +12,9 @@ jobs:
tests:
runs-on: ubuntu-latest
timeout-minutes: 15
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12']
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -21,9 +24,8 @@ jobs:
with:
enable-cache: true
- name: Set up Python
run: uv python install 3.12.8
- name: Set up Python ${{ matrix.python-version }}
run: uv python install ${{ matrix.python-version }}
- name: Install the project
run: uv sync --dev --all-extras

View File

@@ -123,6 +123,7 @@ MODELS = {
"claude-3-haiku-20240307",
],
"gemini": [
"gemini/gemini-2.5-pro-exp-03-25", # Experimental release - March 2025
"gemini/gemini-1.5-flash",
"gemini/gemini-1.5-pro",
"gemini/gemini-gemma-2-9b-it",

View File

@@ -71,20 +71,26 @@ class FilteredStream:
return self._original_stream.flush()
PRO_CONTEXT_SIZE = 2097152 # 2M tokens
FLASH_CONTEXT_SIZE = 1048576 # 1M tokens
GPT4_TURBO_CONTEXT_SIZE = 128000
CLAUDE_LARGE_CONTEXT_SIZE = 200000
LLM_CONTEXT_WINDOW_SIZES = {
# openai
"gpt-4": 8192,
"gpt-4o": 128000,
"gpt-4o-mini": 128000,
"gpt-4-turbo": 128000,
"o1-preview": 128000,
"o1-mini": 128000,
"o3-mini": 200000, # Based on official o3-mini specifications
"gpt-4o": GPT4_TURBO_CONTEXT_SIZE,
"gpt-4o-mini": GPT4_TURBO_CONTEXT_SIZE,
"gpt-4-turbo": GPT4_TURBO_CONTEXT_SIZE,
"o1-preview": GPT4_TURBO_CONTEXT_SIZE,
"o1-mini": GPT4_TURBO_CONTEXT_SIZE,
"o3-mini": CLAUDE_LARGE_CONTEXT_SIZE, # Based on official o3-mini specifications
# gemini
"gemini-2.0-flash": 1048576,
"gemini-1.5-pro": 2097152,
"gemini-1.5-flash": 1048576,
"gemini-1.5-flash-8b": 1048576,
"gemini-2.5-pro-exp-03-25": PRO_CONTEXT_SIZE,
"gemini-2.0-flash": FLASH_CONTEXT_SIZE,
"gemini-1.5-pro": PRO_CONTEXT_SIZE,
"gemini-1.5-flash": FLASH_CONTEXT_SIZE,
"gemini-1.5-flash-8b": FLASH_CONTEXT_SIZE,
# deepseek
"deepseek-chat": 128000,
# groq
@@ -884,10 +890,16 @@ class LLM(BaseLLM):
Derives the custom_llm_provider from the model string.
- For example, if the model is "openrouter/deepseek/deepseek-chat", returns "openrouter".
- If the model is "gemini/gemini-1.5-pro", returns "gemini".
- If there is no '/', defaults to "openai".
- If the model starts with "gemini-", returns "gemini" only for valid Gemini models.
- If there is no '/' or recognized prefix, returns None.
"""
if "/" in self.model:
return self.model.split("/")[0]
if self.model.startswith("gemini-"):
valid_gemini_models = ["gemini-2.5-pro-exp-03-25", "gemini-2.0-flash",
"gemini-1.5-pro", "gemini-1.5-flash", "gemini-1.5-flash-8b"]
if self.model in valid_gemini_models:
return "gemini"
return None
def _validate_call_params(self) -> None:
@@ -951,9 +963,19 @@ class LLM(BaseLLM):
self.context_window_size = int(
DEFAULT_CONTEXT_WINDOW_SIZE * CONTEXT_WINDOW_USAGE_RATIO
)
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
if self.model.startswith(key):
self.context_window_size = int(value * CONTEXT_WINDOW_USAGE_RATIO)
model_name = self.model
if "/" in model_name:
model_name = model_name.split("/", 1)[1]
if model_name in LLM_CONTEXT_WINDOW_SIZES:
self.context_window_size = int(LLM_CONTEXT_WINDOW_SIZES[model_name] * CONTEXT_WINDOW_USAGE_RATIO)
else:
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
if model_name.startswith(key):
self.context_window_size = int(value * CONTEXT_WINDOW_USAGE_RATIO)
break
return self.context_window_size
def set_callbacks(self, callbacks: List[Any]):

View File

@@ -1,4 +1,4 @@
from typing import TYPE_CHECKING, Any, Dict, Optional, Self
from typing import TYPE_CHECKING, Any, Dict, Optional
from crewai.memory.external.external_memory_item import ExternalMemoryItem
from crewai.memory.memory import Memory
@@ -52,7 +52,7 @@ class ExternalMemory(Memory):
def reset(self) -> None:
self.storage.reset()
def set_crew(self, crew: Any) -> Self:
def set_crew(self, crew: Any) -> "ExternalMemory":
super().set_crew(crew)
if not self.storage:

View File

@@ -1,4 +1,4 @@
from typing import Any, Dict, List, Optional, Self
from typing import Any, Dict, List, Optional
from pydantic import BaseModel
@@ -38,6 +38,6 @@ class Memory(BaseModel):
query=query, limit=limit, score_threshold=score_threshold
)
def set_crew(self, crew: Any) -> Self:
def set_crew(self, crew: Any) -> "Memory":
self.crew = crew
return self

View File

@@ -0,0 +1,41 @@
import pytest
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM, PRO_CONTEXT_SIZE
def test_get_custom_llm_provider_gemini_2_5():
"""Test that the Gemini 2.5 model is correctly identified as a Gemini provider."""
llm = LLM(model="gemini/gemini-2.5-pro-exp-03-25")
assert llm._get_custom_llm_provider() == "gemini"
def test_gemini_2_5_context_window_size():
"""Test that the Gemini 2.5 model has the correct context window size."""
llm = LLM(model="gemini-2.5-pro-exp-03-25")
expected_size = int(PRO_CONTEXT_SIZE * CONTEXT_WINDOW_USAGE_RATIO)
assert llm.get_context_window_size() == expected_size
def test_gemini_2_5_invalid_model_name():
"""Test handling of invalid model name variations."""
llm = LLM(model="gemini-2.5-wrong")
assert llm._get_custom_llm_provider() != "gemini"
def test_gemini_2_5_model_parameters():
"""Test model initialization with various parameters."""
llm = LLM(
model="gemini/gemini-2.5-pro-exp-03-25",
temperature=0.7,
max_tokens=1000
)
assert llm.model == "gemini/gemini-2.5-pro-exp-03-25"
assert llm.temperature == 0.7
assert llm.max_tokens == 1000
def test_gemini_2_5_with_and_without_prefix():
"""Test that the model works with and without the 'gemini/' prefix."""
llm_with_prefix = LLM(model="gemini/gemini-2.5-pro-exp-03-25")
llm_without_prefix = LLM(model="gemini-2.5-pro-exp-03-25")
assert llm_with_prefix._get_custom_llm_provider() == "gemini"
assert llm_without_prefix._get_custom_llm_provider() == "gemini"
assert llm_with_prefix.get_context_window_size() == llm_without_prefix.get_context_window_size()