fix: strip whitespace from API keys and handle AuthenticationError with guidance

Fixes #5622

- Strip whitespace/newlines from OPENAI_API_KEY when read from env vars
  or passed directly (in _normalize_openai_fields and _get_client_params)
- Strip whitespace from env var values in llm_utils.py fallback path
- Add specific AuthenticationError handling in all OpenAI completion
  methods (sync/async, completions/responses) with troubleshooting
  guidance for users
- Let AuthenticationError propagate through call()/acall() without
  being swallowed by the generic Exception handler
- Add comprehensive tests covering whitespace stripping and auth error
  handling

Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
Devin AI
2026-04-25 19:34:01 +00:00
parent cb46a1c4ba
commit 47b0081333
4 changed files with 209 additions and 3 deletions

View File

@@ -8,7 +8,14 @@ import os
from typing import TYPE_CHECKING, Any, ClassVar, Literal, TypedDict
import httpx
from openai import APIConnectionError, AsyncOpenAI, NotFoundError, OpenAI, Stream
from openai import (
APIConnectionError,
AsyncOpenAI,
AuthenticationError,
NotFoundError,
OpenAI,
Stream,
)
from openai.lib.streaming.chat import ChatCompletionStream
from openai.types.chat import (
ChatCompletion,
@@ -246,7 +253,8 @@ class OpenAICompletion(BaseLLM):
return data
if not data.get("provider"):
data["provider"] = "openai"
data["api_key"] = data.get("api_key") or os.getenv("OPENAI_API_KEY")
raw_key = data.get("api_key") or os.getenv("OPENAI_API_KEY")
data["api_key"] = raw_key.strip() if isinstance(raw_key, str) else raw_key
# Extract api_base from kwargs if present
if "api_base" not in data:
data["api_base"] = None
@@ -363,7 +371,8 @@ class OpenAICompletion(BaseLLM):
"""Get OpenAI client parameters."""
if self.api_key is None:
self.api_key = os.getenv("OPENAI_API_KEY")
raw_key = os.getenv("OPENAI_API_KEY")
self.api_key = raw_key.strip() if isinstance(raw_key, str) else raw_key
if self.api_key is None:
raise ValueError("OPENAI_API_KEY is required")
@@ -388,6 +397,18 @@ class OpenAICompletion(BaseLLM):
return client_params
@staticmethod
def _format_auth_error(e: AuthenticationError) -> str:
"""Format an authentication error with troubleshooting guidance."""
return (
f"Authentication failed for OpenAI API: {e}\n"
"Troubleshooting steps:\n"
" 1. Verify OPENAI_API_KEY is set correctly in your environment or .env file\n"
" 2. Ensure the key has no extra whitespace or quotes\n"
" 3. Confirm the key is still active at https://platform.openai.com/api-keys\n"
" 4. If using a .env file, ensure it is in your project root and contains the correct key"
)
def call(
self,
messages: str | list[LLMMessage],
@@ -449,6 +470,8 @@ class OpenAICompletion(BaseLLM):
response_model=response_model,
)
except AuthenticationError:
raise
except Exception as e:
error_msg = f"OpenAI API call failed: {e!s}"
logging.error(error_msg)
@@ -544,6 +567,8 @@ class OpenAICompletion(BaseLLM):
response_model=response_model,
)
except AuthenticationError:
raise
except Exception as e:
error_msg = f"OpenAI API call failed: {e!s}"
logging.error(error_msg)
@@ -915,6 +940,13 @@ class OpenAICompletion(BaseLLM):
params.get("input", []), content, from_agent
)
except AuthenticationError as e:
error_msg = self._format_auth_error(e)
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
except NotFoundError as e:
error_msg = f"Model {self.model} not found: {e}"
logging.error(error_msg)
@@ -1049,6 +1081,13 @@ class OpenAICompletion(BaseLLM):
usage=usage,
)
except AuthenticationError as e:
error_msg = self._format_auth_error(e)
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
except NotFoundError as e:
error_msg = f"Model {self.model} not found: {e}"
logging.error(error_msg)
@@ -1717,6 +1756,13 @@ class OpenAICompletion(BaseLLM):
content = self._invoke_after_llm_call_hooks(
params["messages"], content, from_agent
)
except AuthenticationError as e:
error_msg = self._format_auth_error(e)
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
except NotFoundError as e:
error_msg = f"Model {self.model} not found: {e}"
logging.error(error_msg)
@@ -2106,6 +2152,13 @@ class OpenAICompletion(BaseLLM):
if usage.get("total_tokens", 0) > 0:
logging.info(f"OpenAI API usage: {usage}")
except AuthenticationError as e:
error_msg = self._format_auth_error(e)
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
except NotFoundError as e:
error_msg = f"Model {self.model} not found: {e}"
logging.error(error_msg)

View File

@@ -158,6 +158,7 @@ def _llm_via_environment_or_fallback() -> LLM | None:
if key_name and key_name not in unaccepted_attributes:
env_value = os.environ.get(key_name)
if env_value:
env_value = env_value.strip()
# Map environment variable names to recognized parameters
param_key = _normalize_key_name(key_name.lower())
llm_params[param_key] = env_value

View File

@@ -2100,3 +2100,134 @@ def test_openai_no_detail_fields_omitted():
assert usage["completion_tokens"] == 30
assert "cached_prompt_tokens" not in usage
assert "reasoning_tokens" not in usage
class TestOpenAIApiKeyHandling:
"""Tests for API key handling, whitespace stripping, and authentication error messages.
Covers the scenario from issue #5622 where OPENAI_API_KEY works locally
but fails inside CrewAI due to whitespace or propagation issues.
"""
def test_api_key_whitespace_stripped_from_env(self):
"""Test that whitespace in OPENAI_API_KEY env var is stripped during normalization."""
with patch.dict(os.environ, {"OPENAI_API_KEY": " sk-test-key-123 "}, clear=False):
llm = LLM(model="openai/gpt-4o")
assert llm.api_key == "sk-test-key-123"
def test_api_key_newline_stripped_from_env(self):
"""Test that newlines in OPENAI_API_KEY env var are stripped."""
with patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-key-123\n"}, clear=False):
llm = LLM(model="openai/gpt-4o")
assert llm.api_key == "sk-test-key-123"
def test_api_key_tabs_stripped_from_env(self):
"""Test that tab characters in OPENAI_API_KEY env var are stripped."""
with patch.dict(os.environ, {"OPENAI_API_KEY": "\tsk-test-key-123\t"}, clear=False):
llm = LLM(model="openai/gpt-4o")
assert llm.api_key == "sk-test-key-123"
def test_api_key_passed_directly_whitespace_stripped(self):
"""Test that whitespace in directly-passed api_key is stripped."""
llm = LLM(model="openai/gpt-4o", api_key=" sk-direct-key ")
assert llm.api_key == "sk-direct-key"
def test_api_key_no_whitespace_unchanged(self):
"""Test that a clean API key is not modified."""
llm = LLM(model="openai/gpt-4o", api_key="sk-clean-key")
assert llm.api_key == "sk-clean-key"
def test_api_key_whitespace_stripped_in_get_client_params(self):
"""Test that _get_client_params strips whitespace when reading from env at call time."""
llm = LLM(model="openai/gpt-4o", api_key="sk-test")
# Simulate api_key being None to trigger env var read in _get_client_params
llm.api_key = None
with patch.dict(os.environ, {"OPENAI_API_KEY": " sk-env-key "}, clear=False):
params = llm._get_client_params()
assert params["api_key"] == "sk-env-key"
def test_api_key_missing_raises_value_error(self):
"""Test that missing OPENAI_API_KEY raises ValueError with clear message."""
llm = LLM(model="openai/gpt-4o", api_key="sk-test")
llm.api_key = None
with patch.dict(os.environ, {}, clear=True):
with pytest.raises(ValueError, match="OPENAI_API_KEY is required"):
llm._get_client_params()
def test_authentication_error_provides_troubleshooting_guidance(self):
"""Test that AuthenticationError is caught and re-raised with helpful guidance."""
from openai import AuthenticationError
import httpx
llm = LLM(model="openai/gpt-4o", api_key="sk-invalid-key")
mock_response = httpx.Response(
status_code=401,
request=httpx.Request("POST", "https://api.openai.com/v1/chat/completions"),
json={"error": {"message": "Incorrect API key provided", "type": "invalid_api_key"}},
)
auth_error = AuthenticationError(
message="Incorrect API key provided",
response=mock_response,
body={"error": {"message": "Incorrect API key provided"}},
)
with patch.object(llm, "_get_sync_client") as mock_client:
mock_client.return_value.chat.completions.create.side_effect = auth_error
with pytest.raises(AuthenticationError) as exc_info:
llm.call(messages=[{"role": "user", "content": "Hello"}])
error_str = str(exc_info.value)
assert "Incorrect API key" in error_str
def test_authentication_error_logged_with_troubleshooting(self):
"""Test that AuthenticationError logs the troubleshooting message."""
from openai import AuthenticationError
import httpx
llm = LLM(model="openai/gpt-4o", api_key="sk-invalid-key")
mock_response = httpx.Response(
status_code=401,
request=httpx.Request("POST", "https://api.openai.com/v1/chat/completions"),
json={"error": {"message": "Incorrect API key provided", "type": "invalid_api_key"}},
)
auth_error = AuthenticationError(
message="Incorrect API key provided",
response=mock_response,
body={"error": {"message": "Incorrect API key provided"}},
)
with patch.object(llm, "_get_sync_client") as mock_client:
mock_client.return_value.chat.completions.create.side_effect = auth_error
with patch("crewai.llms.providers.openai.completion.logging") as mock_logging:
with pytest.raises(AuthenticationError):
llm.call(messages=[{"role": "user", "content": "Hello"}])
logged_msg = mock_logging.error.call_args[0][0]
assert "Troubleshooting steps" in logged_msg
assert "OPENAI_API_KEY" in logged_msg
assert ".env" in logged_msg
def test_format_auth_error_message_content(self):
"""Test _format_auth_error returns a message with troubleshooting guidance."""
from openai import AuthenticationError
import httpx
mock_response = httpx.Response(
status_code=401,
request=httpx.Request("POST", "https://api.openai.com/v1/chat/completions"),
json={"error": {"message": "Invalid key", "type": "invalid_api_key"}},
)
auth_error = AuthenticationError(
message="Invalid key",
response=mock_response,
body={"error": {"message": "Invalid key"}},
)
msg = OpenAICompletion._format_auth_error(auth_error)
assert "Authentication failed for OpenAI API" in msg
assert "OPENAI_API_KEY" in msg
assert "whitespace" in msg
assert "platform.openai.com/api-keys" in msg
assert ".env" in msg

View File

@@ -138,3 +138,24 @@ def test_create_llm_anthropic_missing_dependency() -> None:
create_llm(llm_value="anthropic/claude-3-sonnet")
assert "Anthropic native provider not available, to install: uv add \"crewai[anthropic]\"" in str(exc_info.value)
def test_env_var_api_key_whitespace_stripped() -> None:
"""Test that API keys read from environment variables have whitespace stripped.
Covers issue #5622 where whitespace in env vars causes auth failures.
"""
with patch.dict(os.environ, {"OPENAI_API_KEY": " sk-test-key "}, clear=True):
llm = create_llm(llm_value=None)
assert llm is not None
assert isinstance(llm, BaseLLM)
assert llm.api_key == "sk-test-key"
def test_env_var_api_key_newline_stripped() -> None:
"""Test that newlines in API keys from environment are stripped."""
with patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-key\n"}, clear=True):
llm = create_llm(llm_value=None)
assert llm is not None
assert isinstance(llm, BaseLLM)
assert llm.api_key == "sk-test-key"