diff --git a/src/crewai/llm.py b/src/crewai/llm.py index fb8367dfe..01543c14c 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -829,10 +829,18 @@ class LLM: Derives the custom_llm_provider from the model string. - For example, if the model is "openrouter/deepseek/deepseek-chat", returns "openrouter". - If the model is "gemini/gemini-1.5-pro", returns "gemini". - - If there is no '/', defaults to "openai". + - If the model is "azure/gpt-4", returns "azure". + - If Azure-specific parameters are provided (api_key, api_base, api_version), returns "azure". + - If there is no '/' and no Azure parameters, returns None. """ + # Check if model explicitly has a provider prefix if "/" in self.model: return self.model.split("/")[0] + + # Check if all Azure parameters are present + if self.api_key and self.api_base and self.api_version: + return "azure" + return None def _validate_call_params(self) -> None: @@ -842,7 +850,9 @@ class LLM: The custom_llm_provider is dynamically determined from the model: - E.g., "openrouter/deepseek/deepseek-chat" yields "openrouter" - "gemini/gemini-1.5-pro" yields "gemini" - - If no slash is present, "openai" is assumed. + - "azure/gpt-4" yields "azure" + - If Azure parameters (api_key, api_base, api_version) are present, "azure" is used + - If no slash is present and no Azure parameters, None is returned """ provider = self._get_custom_llm_provider() if self.response_format is not None and not supports_response_schema( diff --git a/test_azure_integration.py b/test_azure_integration.py new file mode 100644 index 000000000..30d38c77f --- /dev/null +++ b/test_azure_integration.py @@ -0,0 +1,37 @@ +# test_azure_integration.py +from src.crewai.llm import LLM + +# Test with Azure parameters but without azure/ prefix +llm = LLM( + api_key='test_key', + api_base='test_base', + model='gpt-4o-mini-2024-07-18', + api_version='test_version' +) + +# Print the detected provider +provider = llm._get_custom_llm_provider() +print(f"Detected provider: {provider}") +print(f"Is Azure detected correctly: {provider == 'azure'}") + +# Prepare parameters that would be passed to LiteLLM +params = llm._prepare_completion_params(messages=[{"role": "user", "content": "test"}]) +print(f"Parameters passed to LiteLLM: {params}") + +# Test with Azure parameters and with azure/ prefix for comparison +llm_with_prefix = LLM( + api_key='test_key', + api_base='test_base', + model='azure/gpt-4o-mini-2024-07-18', + api_version='test_version' +) + +# Print the detected provider +provider_with_prefix = llm_with_prefix._get_custom_llm_provider() +print(f"\nWith azure/ prefix:") +print(f"Detected provider: {provider_with_prefix}") +print(f"Is Azure detected correctly: {provider_with_prefix == 'azure'}") + +# Prepare parameters that would be passed to LiteLLM +params_with_prefix = llm_with_prefix._prepare_completion_params(messages=[{"role": "user", "content": "test"}]) +print(f"Parameters passed to LiteLLM: {params_with_prefix}") diff --git a/tests/llm_test.py b/tests/llm_test.py index c674b623b..32c4830aa 100644 --- a/tests/llm_test.py +++ b/tests/llm_test.py @@ -221,6 +221,20 @@ def test_get_custom_llm_provider_openai(): llm = LLM(model="gpt-4") assert llm._get_custom_llm_provider() == None +def test_get_custom_llm_provider_azure_with_prefix(): + llm = LLM(model="azure/gpt-4") + assert llm._get_custom_llm_provider() == "azure" + + +def test_get_custom_llm_provider_azure_without_prefix(): + llm = LLM( + model="gpt-4", + api_key="test_key", + api_base="test_base", + api_version="test_version" + ) + assert llm._get_custom_llm_provider() == "azure" + def test_validate_call_params_supported(): class DummyResponse(BaseModel):