mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-11 00:58:30 +00:00
Merge branch 'main' into joaomdmoura/amp-to-aop
This commit is contained in:
@@ -145,6 +145,7 @@ MODELS = {
|
|||||||
"claude-3-haiku-20240307",
|
"claude-3-haiku-20240307",
|
||||||
],
|
],
|
||||||
"gemini": [
|
"gemini": [
|
||||||
|
"gemini/gemini-3-pro-preview",
|
||||||
"gemini/gemini-1.5-flash",
|
"gemini/gemini-1.5-flash",
|
||||||
"gemini/gemini-1.5-pro",
|
"gemini/gemini-1.5-pro",
|
||||||
"gemini/gemini-2.0-flash-lite-001",
|
"gemini/gemini-2.0-flash-lite-001",
|
||||||
|
|||||||
@@ -179,6 +179,7 @@ LLM_CONTEXT_WINDOW_SIZES: Final[dict[str, int]] = {
|
|||||||
"o3-mini": 200000,
|
"o3-mini": 200000,
|
||||||
"o4-mini": 200000,
|
"o4-mini": 200000,
|
||||||
# gemini
|
# gemini
|
||||||
|
"gemini-3-pro-preview": 1048576,
|
||||||
"gemini-2.0-flash": 1048576,
|
"gemini-2.0-flash": 1048576,
|
||||||
"gemini-2.0-flash-thinking-exp-01-21": 32768,
|
"gemini-2.0-flash-thinking-exp-01-21": 32768,
|
||||||
"gemini-2.0-flash-lite-001": 1048576,
|
"gemini-2.0-flash-lite-001": 1048576,
|
||||||
|
|||||||
@@ -235,6 +235,7 @@ ANTHROPIC_MODELS: list[AnthropicModels] = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
GeminiModels: TypeAlias = Literal[
|
GeminiModels: TypeAlias = Literal[
|
||||||
|
"gemini-3-pro-preview",
|
||||||
"gemini-2.5-pro",
|
"gemini-2.5-pro",
|
||||||
"gemini-2.5-pro-preview-03-25",
|
"gemini-2.5-pro-preview-03-25",
|
||||||
"gemini-2.5-pro-preview-05-06",
|
"gemini-2.5-pro-preview-05-06",
|
||||||
@@ -287,6 +288,7 @@ GeminiModels: TypeAlias = Literal[
|
|||||||
"learnlm-2.0-flash-experimental",
|
"learnlm-2.0-flash-experimental",
|
||||||
]
|
]
|
||||||
GEMINI_MODELS: list[GeminiModels] = [
|
GEMINI_MODELS: list[GeminiModels] = [
|
||||||
|
"gemini-3-pro-preview",
|
||||||
"gemini-2.5-pro",
|
"gemini-2.5-pro",
|
||||||
"gemini-2.5-pro-preview-03-25",
|
"gemini-2.5-pro-preview-03-25",
|
||||||
"gemini-2.5-pro-preview-05-06",
|
"gemini-2.5-pro-preview-05-06",
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
from typing import Any, cast
|
from typing import Any, cast
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
@@ -100,9 +101,8 @@ class GeminiCompletion(BaseLLM):
|
|||||||
self.stop_sequences = stop_sequences or []
|
self.stop_sequences = stop_sequences or []
|
||||||
|
|
||||||
# Model-specific settings
|
# Model-specific settings
|
||||||
self.is_gemini_2 = "gemini-2" in model.lower()
|
version_match = re.search(r"gemini-(\d+(?:\.\d+)?)", model.lower())
|
||||||
self.is_gemini_1_5 = "gemini-1.5" in model.lower()
|
self.supports_tools = bool(version_match and float(version_match.group(1)) >= 1.5)
|
||||||
self.supports_tools = self.is_gemini_1_5 or self.is_gemini_2
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def stop(self) -> list[str]:
|
def stop(self) -> list[str]:
|
||||||
@@ -559,6 +559,7 @@ class GeminiCompletion(BaseLLM):
|
|||||||
)
|
)
|
||||||
|
|
||||||
context_windows = {
|
context_windows = {
|
||||||
|
"gemini-3-pro-preview": 1048576, # 1M tokens
|
||||||
"gemini-2.0-flash": 1048576, # 1M tokens
|
"gemini-2.0-flash": 1048576, # 1M tokens
|
||||||
"gemini-2.0-flash-thinking": 32768,
|
"gemini-2.0-flash-thinking": 32768,
|
||||||
"gemini-2.0-flash-lite": 1048576,
|
"gemini-2.0-flash-lite": 1048576,
|
||||||
|
|||||||
@@ -0,0 +1,69 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"contents":[{"role":"user","parts":[{"text":"What is the capital of France?"}]}],"generationConfig":{"stop_sequences":[]}}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- '*/*'
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '123'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- generativelanguage.googleapis.com
|
||||||
|
user-agent:
|
||||||
|
- litellm/1.78.5
|
||||||
|
method: POST
|
||||||
|
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-3-pro-preview:generateContent
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAC/21UW4+iSBh9719heGxmBgFvbDIPgKAgNwUV3OxDCSWU3KFApdP/fWl77XF2l6RI
|
||||||
|
5ftOnVN1ku+8vQwGhA+yAAUAw5r4Y/BnXxkM3u7/j16eYZjhvvEo9cUCVPgX9vN7e9r3EAyvH4cI
|
||||||
|
J4IDHxQIg2SQnwZyBTIfDlA9eH21QIXq19cfxLd/HY3yJoywjcIM4KaCHzRSvZbEWpL4YIlRytG8
|
||||||
|
a3eoGiukHPHm3jH2FNvMTC1qLlgS05RL42PVyPMdz1uFHpQuytZSBqcHf7PexMHK3mjJQjWKIbM+
|
||||||
|
MxFL6cvWMMfQFsOJ3UQk5j1hWmoxK1DrLqncyrpcQ+UY0uZog2oqkTmXiQ2f27ZBpS58MXBTxRbX
|
||||||
|
qdfsl25Vn5tswrUHeVhVxenW7kaG0cKdt2hjjxPUBYY26BAUvbqqw30AoG0eTMmzdImnIrI51+VY
|
||||||
|
xeqUl/HKs8ZgfBPF0bbtMDjMzxZSkv3KNuJgwTlYMkw9YEyKMcfkRvUmkiPpBqL486niJEuQKtE7
|
||||||
|
XibhpJy1AltrXSrjq+iEucKfK5z43Ci6bTu+VIVuRNecmwRN2gnbqQHH6lQ06eNM5ttpwEjZVOI3
|
||||||
|
umesM9qbcxMySprtbDYXaboQdioPMpuEy3U4VZrM6njN0rAk8Fh3/ON+E58FJPDtxD8upIWTbI/D
|
||||||
|
MrqM7RWj7VWo6kMFUgaj5Dpzsg8bE6GoIc+rJEcnau8qGNnZygGNcRO61nD5sXgyWbUQ+Z4XQhrX
|
||||||
|
3C6UyS2OTHAp2cUJVp0eSZqtyTuTy48XjmW0xLJVYRqYYmSZhatQ45ROKPZiXTZTxiq2ceDPIhii
|
||||||
|
7tBurqtSL7ylp5NRw5FUzJXsLkiRJs1BIi05Oxit51ToBF2oTGOvYTXjfJptR62SVdTB7W5aaJzq
|
||||||
|
nb9adAVFIii3gZE5Qz87C+ViVKa3eJ2f4pyiSzasywoHJA2klNL01IIYX6o55V8n3BUc8vKagLIp
|
||||||
|
d/pRZoatSfor/yx4bAYp/udP4mlc3r/2f/2aIqLKk/vUpHkAkwf8/QEgTihDdbSBoM6zD5jtmNbX
|
||||||
|
EBIoC+C1Lw9fHgJ3aqKpQQh1iEGfFOArD4iiytMCO3kMMzFv7kkx++R6ypX/beO8D4XfOvSI/vYf
|
||||||
|
1nrea6LkOW+eoqh/IkgQvt2zRnKdpzDpBZ5VHza8PLn1yJrfL0gz45d//Pq0cAerGn16FcK0d+87
|
||||||
|
+72/Yb9gi+DlrklUsC7yrIZK8IHbeV4/2Sy/LL9r50a3aquVZ2uPeHl/+RvdmjG6dAUAAA==
|
||||||
|
headers:
|
||||||
|
Alt-Svc:
|
||||||
|
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json; charset=UTF-8
|
||||||
|
Date:
|
||||||
|
- Wed, 19 Nov 2025 08:56:53 GMT
|
||||||
|
Server:
|
||||||
|
- scaffolding on HTTPServer2
|
||||||
|
Server-Timing:
|
||||||
|
- gfet4t7; dur=2508
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
Vary:
|
||||||
|
- Origin
|
||||||
|
- X-Origin
|
||||||
|
- Referer
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
X-Frame-Options:
|
||||||
|
- SAMEORIGIN
|
||||||
|
X-XSS-Protection:
|
||||||
|
- '0'
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
@@ -455,13 +455,11 @@ def test_gemini_model_capabilities():
|
|||||||
llm_2_0 = LLM(model="google/gemini-2.0-flash-001")
|
llm_2_0 = LLM(model="google/gemini-2.0-flash-001")
|
||||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||||
assert isinstance(llm_2_0, GeminiCompletion)
|
assert isinstance(llm_2_0, GeminiCompletion)
|
||||||
assert llm_2_0.is_gemini_2 == True
|
|
||||||
assert llm_2_0.supports_tools == True
|
assert llm_2_0.supports_tools == True
|
||||||
|
|
||||||
# Test Gemini 1.5 model
|
# Test Gemini 1.5 model
|
||||||
llm_1_5 = LLM(model="google/gemini-1.5-pro")
|
llm_1_5 = LLM(model="google/gemini-1.5-pro")
|
||||||
assert isinstance(llm_1_5, GeminiCompletion)
|
assert isinstance(llm_1_5, GeminiCompletion)
|
||||||
assert llm_1_5.is_gemini_1_5 == True
|
|
||||||
assert llm_1_5.supports_tools == True
|
assert llm_1_5.supports_tools == True
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -259,6 +259,7 @@ def test_validate_call_params_no_response_format():
|
|||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"model",
|
"model",
|
||||||
[
|
[
|
||||||
|
"gemini/gemini-3-pro-preview",
|
||||||
"gemini/gemini-2.0-flash-thinking-exp-01-21",
|
"gemini/gemini-2.0-flash-thinking-exp-01-21",
|
||||||
"gemini/gemini-2.0-flash-001",
|
"gemini/gemini-2.0-flash-001",
|
||||||
"gemini/gemini-2.0-flash-lite-001",
|
"gemini/gemini-2.0-flash-lite-001",
|
||||||
|
|||||||
Reference in New Issue
Block a user