Compare commits

...

5 Commits

Author SHA1 Message Date
Devin AI
c85ff65b24 Fix langchain-google-genai integration by stripping 'models/' prefix
Fixes #3702

When using langchain-google-genai with CrewAI, the model name would include
a 'models/' prefix (e.g., 'models/gemini/gemini-pro') which is added by
Google's API internally. However, LiteLLM does not recognize this format,
causing a 'LLM Provider NOT provided' error.

This fix strips the 'models/' prefix from model names when extracting them
from langchain model objects, ensuring compatibility with LiteLLM while
maintaining backward compatibility with models that don't have this prefix.

Changes:
- Modified create_llm() in llm_utils.py to strip 'models/' prefix
- Added comprehensive tests covering various scenarios:
  - Model with 'models/' prefix in model attribute
  - Model with 'models/' prefix in model_name attribute
  - Model without prefix (no change)
  - Case-sensitive prefix handling (only lowercase 'models/' is stripped)

Co-Authored-By: João <joao@crewai.com>
2025-10-14 07:48:10 +00:00
João Moura
bf2e2a42da fix: don't error out if there it no input() available
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
- Specific to jupyter notebooks
2025-10-13 22:36:19 -04:00
Lorenze Jay
814c962196 chore: update crewAI version to 0.203.1 in multiple templates (#3699)
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Update Test Durations / update-durations (3.10) (push) Has been cancelled
Update Test Durations / update-durations (3.11) (push) Has been cancelled
Update Test Durations / update-durations (3.12) (push) Has been cancelled
Update Test Durations / update-durations (3.13) (push) Has been cancelled
- Bumped the `crewai` version in `__init__.py` to 0.203.1.
- Updated the dependency versions in the crew, flow, and tool templates' `pyproject.toml` files to reflect the new `crewai` version.
2025-10-13 11:46:22 -07:00
Heitor Carvalho
2ebb2e845f fix: add a leeway of 10s when decoding jwt (#3698) 2025-10-13 12:42:03 -03:00
Greyson LaLonde
7b550ebfe8 fix: inject tool repository credentials in crewai run command
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Build uv cache / build-cache (3.10) (push) Has been cancelled
Build uv cache / build-cache (3.11) (push) Has been cancelled
Build uv cache / build-cache (3.12) (push) Has been cancelled
Build uv cache / build-cache (3.13) (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
2025-10-10 15:00:04 -04:00
10 changed files with 93 additions and 22 deletions

View File

@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
_suppress_pydantic_deprecation_warnings()
__version__ = "0.203.0"
__version__ = "0.203.1"
_telemetry_submitted = False

View File

@@ -30,6 +30,7 @@ def validate_jwt_token(
algorithms=["RS256"],
audience=audience,
issuer=issuer,
leeway=10.0,
options={
"verify_signature": True,
"verify_exp": True,

View File

@@ -1,10 +1,11 @@
import os
import subprocess
from enum import Enum
import click
from packaging import version
from crewai.cli.utils import read_toml
from crewai.cli.utils import build_env_with_tool_repository_credentials, read_toml
from crewai.cli.version import get_crewai_version
@@ -55,8 +56,22 @@ def execute_command(crew_type: CrewType) -> None:
"""
command = ["uv", "run", "kickoff" if crew_type == CrewType.FLOW else "run_crew"]
env = os.environ.copy()
try:
subprocess.run(command, capture_output=False, text=True, check=True) # noqa: S603
pyproject_data = read_toml()
sources = pyproject_data.get("tool", {}).get("uv", {}).get("sources", {})
for source_config in sources.values():
if isinstance(source_config, dict):
index = source_config.get("index")
if index:
index_env = build_env_with_tool_repository_credentials(index)
env.update(index_env)
except Exception: # noqa: S110
pass
try:
subprocess.run(command, capture_output=False, text=True, check=True, env=env) # noqa: S603
except subprocess.CalledProcessError as e:
handle_error(e, crew_type)

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.203.0,<1.0.0"
"crewai[tools]>=0.203.1,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.203.0,<1.0.0",
"crewai[tools]>=0.203.1,<1.0.0",
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.203.0"
"crewai[tools]>=0.203.1"
]
[tool.crewai]

View File

@@ -358,7 +358,8 @@ def prompt_user_for_trace_viewing(timeout_seconds: int = 20) -> bool:
try:
response = input().strip().lower()
result[0] = response in ["y", "yes"]
except (EOFError, KeyboardInterrupt):
except (EOFError, KeyboardInterrupt, OSError, LookupError):
# Handle all input-related errors silently
result[0] = False
input_thread = threading.Thread(target=get_input, daemon=True)
@@ -371,6 +372,7 @@ def prompt_user_for_trace_viewing(timeout_seconds: int = 20) -> bool:
return result[0]
except Exception:
# Suppress any warnings or errors and assume "no"
return False

View File

@@ -41,6 +41,10 @@ def create_llm(
or getattr(llm_value, "deployment_name", None)
or str(llm_value)
)
if isinstance(model, str) and model.startswith("models/"):
model = model[len("models/"):]
temperature: float | None = getattr(llm_value, "temperature", None)
max_tokens: int | None = getattr(llm_value, "max_tokens", None)
logprobs: int | None = getattr(llm_value, "logprobs", None)

View File

@@ -1,7 +1,7 @@
import jwt
import unittest
from unittest.mock import MagicMock, patch
import jwt
from crewai.cli.authentication.utils import validate_jwt_token
@@ -17,19 +17,22 @@ class TestUtils(unittest.TestCase):
key="mock_signing_key"
)
jwt_token = "aaaaa.bbbbbb.cccccc" # noqa: S105
decoded_token = validate_jwt_token(
jwt_token="aaaaa.bbbbbb.cccccc",
jwt_token=jwt_token,
jwks_url="https://mock_jwks_url",
issuer="https://mock_issuer",
audience="app_id_xxxx",
)
mock_jwt.decode.assert_called_with(
"aaaaa.bbbbbb.cccccc",
jwt_token,
"mock_signing_key",
algorithms=["RS256"],
audience="app_id_xxxx",
issuer="https://mock_issuer",
leeway=10.0,
options={
"verify_signature": True,
"verify_exp": True,
@@ -43,9 +46,9 @@ class TestUtils(unittest.TestCase):
def test_validate_jwt_token_expired(self, mock_jwt, mock_pyjwkclient):
mock_jwt.decode.side_effect = jwt.ExpiredSignatureError
with self.assertRaises(Exception):
with self.assertRaises(Exception): # noqa: B017
validate_jwt_token(
jwt_token="aaaaa.bbbbbb.cccccc",
jwt_token="aaaaa.bbbbbb.cccccc", # noqa: S106
jwks_url="https://mock_jwks_url",
issuer="https://mock_issuer",
audience="app_id_xxxx",
@@ -53,9 +56,9 @@ class TestUtils(unittest.TestCase):
def test_validate_jwt_token_invalid_audience(self, mock_jwt, mock_pyjwkclient):
mock_jwt.decode.side_effect = jwt.InvalidAudienceError
with self.assertRaises(Exception):
with self.assertRaises(Exception): # noqa: B017
validate_jwt_token(
jwt_token="aaaaa.bbbbbb.cccccc",
jwt_token="aaaaa.bbbbbb.cccccc", # noqa: S106
jwks_url="https://mock_jwks_url",
issuer="https://mock_issuer",
audience="app_id_xxxx",
@@ -63,9 +66,9 @@ class TestUtils(unittest.TestCase):
def test_validate_jwt_token_invalid_issuer(self, mock_jwt, mock_pyjwkclient):
mock_jwt.decode.side_effect = jwt.InvalidIssuerError
with self.assertRaises(Exception):
with self.assertRaises(Exception): # noqa: B017
validate_jwt_token(
jwt_token="aaaaa.bbbbbb.cccccc",
jwt_token="aaaaa.bbbbbb.cccccc", # noqa: S106
jwks_url="https://mock_jwks_url",
issuer="https://mock_issuer",
audience="app_id_xxxx",
@@ -75,9 +78,9 @@ class TestUtils(unittest.TestCase):
self, mock_jwt, mock_pyjwkclient
):
mock_jwt.decode.side_effect = jwt.MissingRequiredClaimError
with self.assertRaises(Exception):
with self.assertRaises(Exception): # noqa: B017
validate_jwt_token(
jwt_token="aaaaa.bbbbbb.cccccc",
jwt_token="aaaaa.bbbbbb.cccccc", # noqa: S106
jwks_url="https://mock_jwks_url",
issuer="https://mock_issuer",
audience="app_id_xxxx",
@@ -85,9 +88,9 @@ class TestUtils(unittest.TestCase):
def test_validate_jwt_token_jwks_error(self, mock_jwt, mock_pyjwkclient):
mock_jwt.decode.side_effect = jwt.exceptions.PyJWKClientError
with self.assertRaises(Exception):
with self.assertRaises(Exception): # noqa: B017
validate_jwt_token(
jwt_token="aaaaa.bbbbbb.cccccc",
jwt_token="aaaaa.bbbbbb.cccccc", # noqa: S106
jwks_url="https://mock_jwks_url",
issuer="https://mock_issuer",
audience="app_id_xxxx",
@@ -95,9 +98,9 @@ class TestUtils(unittest.TestCase):
def test_validate_jwt_token_invalid_token(self, mock_jwt, mock_pyjwkclient):
mock_jwt.decode.side_effect = jwt.InvalidTokenError
with self.assertRaises(Exception):
with self.assertRaises(Exception): # noqa: B017
validate_jwt_token(
jwt_token="aaaaa.bbbbbb.cccccc",
jwt_token="aaaaa.bbbbbb.cccccc", # noqa: S106
jwks_url="https://mock_jwks_url",
issuer="https://mock_issuer",
audience="app_id_xxxx",

View File

@@ -94,3 +94,49 @@ def test_create_llm_with_invalid_type():
with pytest.raises(BadRequestError, match="LLM Provider NOT provided"):
llm = create_llm(llm_value=42)
llm.call(messages=[{"role": "user", "content": "Hello, world!"}])
def test_create_llm_strips_models_prefix_from_model_attribute():
"""Test that 'models/' prefix is stripped from langchain model names."""
class LangChainLikeModel:
model = "models/gemini/gemini-pro"
temperature = 0.7
obj = LangChainLikeModel()
llm = create_llm(llm_value=obj)
assert isinstance(llm, LLM)
assert llm.model == "gemini/gemini-pro" # 'models/' prefix should be stripped
assert llm.temperature == 0.7
def test_create_llm_strips_models_prefix_from_model_name_attribute():
"""Test that 'models/' prefix is stripped from model_name attribute."""
class LangChainLikeModelWithModelName:
model_name = "models/gemini/gemini-2.0-flash"
obj = LangChainLikeModelWithModelName()
llm = create_llm(llm_value=obj)
assert isinstance(llm, LLM)
assert llm.model == "gemini/gemini-2.0-flash" # 'models/' prefix should be stripped
def test_create_llm_handles_model_without_prefix():
"""Test that models without 'models/' prefix are handled correctly."""
class RegularModel:
model = "gemini/gemini-pro"
obj = RegularModel()
llm = create_llm(llm_value=obj)
assert isinstance(llm, LLM)
assert llm.model == "gemini/gemini-pro" # No change when prefix not present
def test_create_llm_strips_models_prefix_case_sensitive():
"""Test that only lowercase 'models/' prefix is stripped."""
class UpperCaseModel:
model = "Models/gemini/gemini-pro" # Uppercase M
obj = UpperCaseModel()
llm = create_llm(llm_value=obj)
assert isinstance(llm, LLM)
assert llm.model == "Models/gemini/gemini-pro"