Fix Azure OpenAI authentication for models without azure/ prefix (fixes #2358)

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-03-13 08:41:04 +00:00
parent 41a670166a
commit 1145900b39
3 changed files with 63 additions and 2 deletions

View File

@@ -829,10 +829,18 @@ class LLM:
Derives the custom_llm_provider from the model string. Derives the custom_llm_provider from the model string.
- For example, if the model is "openrouter/deepseek/deepseek-chat", returns "openrouter". - For example, if the model is "openrouter/deepseek/deepseek-chat", returns "openrouter".
- If the model is "gemini/gemini-1.5-pro", returns "gemini". - If the model is "gemini/gemini-1.5-pro", returns "gemini".
- If there is no '/', defaults to "openai". - If the model is "azure/gpt-4", returns "azure".
- If Azure-specific parameters are provided (api_key, api_base, api_version), returns "azure".
- If there is no '/' and no Azure parameters, returns None.
""" """
# Check if model explicitly has a provider prefix
if "/" in self.model: if "/" in self.model:
return self.model.split("/")[0] return self.model.split("/")[0]
# Check if all Azure parameters are present
if self.api_key and self.api_base and self.api_version:
return "azure"
return None return None
def _validate_call_params(self) -> None: def _validate_call_params(self) -> None:
@@ -842,7 +850,9 @@ class LLM:
The custom_llm_provider is dynamically determined from the model: The custom_llm_provider is dynamically determined from the model:
- E.g., "openrouter/deepseek/deepseek-chat" yields "openrouter" - E.g., "openrouter/deepseek/deepseek-chat" yields "openrouter"
- "gemini/gemini-1.5-pro" yields "gemini" - "gemini/gemini-1.5-pro" yields "gemini"
- If no slash is present, "openai" is assumed. - "azure/gpt-4" yields "azure"
- If Azure parameters (api_key, api_base, api_version) are present, "azure" is used
- If no slash is present and no Azure parameters, None is returned
""" """
provider = self._get_custom_llm_provider() provider = self._get_custom_llm_provider()
if self.response_format is not None and not supports_response_schema( if self.response_format is not None and not supports_response_schema(

37
test_azure_integration.py Normal file
View File

@@ -0,0 +1,37 @@
# test_azure_integration.py
from src.crewai.llm import LLM
# Test with Azure parameters but without azure/ prefix
llm = LLM(
api_key='test_key',
api_base='test_base',
model='gpt-4o-mini-2024-07-18',
api_version='test_version'
)
# Print the detected provider
provider = llm._get_custom_llm_provider()
print(f"Detected provider: {provider}")
print(f"Is Azure detected correctly: {provider == 'azure'}")
# Prepare parameters that would be passed to LiteLLM
params = llm._prepare_completion_params(messages=[{"role": "user", "content": "test"}])
print(f"Parameters passed to LiteLLM: {params}")
# Test with Azure parameters and with azure/ prefix for comparison
llm_with_prefix = LLM(
api_key='test_key',
api_base='test_base',
model='azure/gpt-4o-mini-2024-07-18',
api_version='test_version'
)
# Print the detected provider
provider_with_prefix = llm_with_prefix._get_custom_llm_provider()
print(f"\nWith azure/ prefix:")
print(f"Detected provider: {provider_with_prefix}")
print(f"Is Azure detected correctly: {provider_with_prefix == 'azure'}")
# Prepare parameters that would be passed to LiteLLM
params_with_prefix = llm_with_prefix._prepare_completion_params(messages=[{"role": "user", "content": "test"}])
print(f"Parameters passed to LiteLLM: {params_with_prefix}")

View File

@@ -221,6 +221,20 @@ def test_get_custom_llm_provider_openai():
llm = LLM(model="gpt-4") llm = LLM(model="gpt-4")
assert llm._get_custom_llm_provider() == None assert llm._get_custom_llm_provider() == None
def test_get_custom_llm_provider_azure_with_prefix():
llm = LLM(model="azure/gpt-4")
assert llm._get_custom_llm_provider() == "azure"
def test_get_custom_llm_provider_azure_without_prefix():
llm = LLM(
model="gpt-4",
api_key="test_key",
api_base="test_base",
api_version="test_version"
)
assert llm._get_custom_llm_provider() == "azure"
def test_validate_call_params_supported(): def test_validate_call_params_supported():
class DummyResponse(BaseModel): class DummyResponse(BaseModel):