fix: respect vertex ai location settings

- Add location parameter to LLM class
- Add test for vertex ai location setting
- Update documentation

Fixes #2141

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-02-15 18:53:31 +00:00
parent 1b488b6da7
commit 5c171166d4
2 changed files with 31 additions and 0 deletions

View File

@@ -3,6 +3,7 @@ from time import sleep
from unittest.mock import MagicMock, patch
import pytest
import litellm
from pydantic import BaseModel
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
@@ -12,6 +13,21 @@ from crewai.utilities.token_counter_callback import TokenCalcHandler
# TODO: This test fails without print statement, which makes me think that something is happening asynchronously that we need to eventually fix and dive deeper into at a later date
@pytest.mark.vcr(filter_headers=["authorization"])
def test_vertex_ai_location():
"""Test that Vertex AI location setting is respected."""
location = "europe-west4"
llm = LLM(
model="vertex_ai/gemini-2.0-flash",
location=location,
)
# Verify location is set correctly
assert litellm.vertex_location == location
# Reset location after test
litellm.vertex_location = None
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_callback_replacement():
llm1 = LLM(model="gpt-4o-mini")