From bb19998fe74d40f265bdf240dec4e59e6a4efb4b Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 09:07:32 +0000 Subject: [PATCH] feat: add extra_headers parameter to LLM class - Add extra_headers parameter to LLM constructor for custom authentication headers - Update _prepare_completion_params to pass extra_headers to LiteLLM - Add comprehensive tests for extra_headers functionality - Ensure backward compatibility with None default value Fixes #3177 Co-Authored-By: Jo\u00E3o --- src/crewai/llm.py | 3 ++ tests/llm_test.py | 79 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/src/crewai/llm.py b/src/crewai/llm.py index d6f40a09a..9e897781c 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -311,6 +311,7 @@ class LLM(BaseLLM): callbacks: List[Any] = [], reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None, stream: bool = False, + extra_headers: Optional[Dict[str, str]] = None, **kwargs, ): self.model = model @@ -337,6 +338,7 @@ class LLM(BaseLLM): self.additional_params = kwargs self.is_anthropic = self._is_anthropic_model(model) self.stream = stream + self.extra_headers = extra_headers litellm.drop_params = True @@ -408,6 +410,7 @@ class LLM(BaseLLM): "stream": self.stream, "tools": tools, "reasoning_effort": self.reasoning_effort, + "extra_headers": self.extra_headers, **self.additional_params, } diff --git a/tests/llm_test.py b/tests/llm_test.py index 1065876af..f8068f412 100644 --- a/tests/llm_test.py +++ b/tests/llm_test.py @@ -509,6 +509,85 @@ def test_deepseek_r1_with_open_router(): assert "Paris" in result +@pytest.mark.vcr(filter_headers=["authorization"]) +def test_llm_passes_extra_headers(): + """Test that extra_headers parameter is passed to litellm.completion.""" + extra_headers = { + "X-Custom-Auth": "bearer token123", + "X-API-Version": "v1.0" + } + + llm = LLM( + model="gpt-4o-mini", + extra_headers=extra_headers, + ) + + messages = [{"role": "user", "content": "Hello, world!"}] + + with patch("litellm.completion") as mocked_completion: + mock_message = MagicMock() + mock_message.content = "Test response" + mock_choice = MagicMock() + mock_choice.message = mock_message + mock_response = MagicMock() + mock_response.choices = [mock_choice] + mock_response.usage = { + "prompt_tokens": 5, + "completion_tokens": 5, + "total_tokens": 10, + } + + mocked_completion.return_value = mock_response + + result = llm.call(messages) + + mocked_completion.assert_called_once() + + _, kwargs = mocked_completion.call_args + + assert kwargs["extra_headers"] == extra_headers + + assert kwargs["model"] == "gpt-4o-mini" + assert kwargs["messages"] == messages + + assert result == "Test response" + + +def test_llm_extra_headers_none_by_default(): + """Test that extra_headers defaults to None and doesn't break existing functionality.""" + llm = LLM(model="gpt-4o-mini") + + messages = [{"role": "user", "content": "Hello, world!"}] + + with patch("litellm.completion") as mocked_completion: + mock_message = MagicMock() + mock_message.content = "Test response" + mock_choice = MagicMock() + mock_choice.message = mock_message + mock_response = MagicMock() + mock_response.choices = [mock_choice] + mock_response.usage = { + "prompt_tokens": 5, + "completion_tokens": 5, + "total_tokens": 10, + } + + mocked_completion.return_value = mock_response + + result = llm.call(messages) + + mocked_completion.assert_called_once() + + _, kwargs = mocked_completion.call_args + + assert "extra_headers" not in kwargs + + assert kwargs["model"] == "gpt-4o-mini" + assert kwargs["messages"] == messages + + assert result == "Test response" + + def assert_event_count( mock_emit, expected_completed_tool_call: int = 0,