Compare commits

...

2 Commits

Author SHA1 Message Date
Devin AI
cf5f0a3553 fix: regenerate uv.lock to resolve TOML parse errors
- Remove corrupted uv.lock file that had missing version field
- Regenerate with uv sync to ensure proper dependency resolution

Co-Authored-By: Jo\u00E3o <joao@crewai.com>
2025-07-17 09:07:45 +00:00
Devin AI
bb19998fe7 feat: add extra_headers parameter to LLM class
- Add extra_headers parameter to LLM constructor for custom authentication headers
- Update _prepare_completion_params to pass extra_headers to LiteLLM
- Add comprehensive tests for extra_headers functionality
- Ensure backward compatibility with None default value

Fixes #3177

Co-Authored-By: Jo\u00E3o <joao@crewai.com>
2025-07-17 09:07:32 +00:00
3 changed files with 3217 additions and 3247 deletions

View File

@@ -311,6 +311,7 @@ class LLM(BaseLLM):
callbacks: List[Any] = [],
reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None,
stream: bool = False,
extra_headers: Optional[Dict[str, str]] = None,
**kwargs,
):
self.model = model
@@ -337,6 +338,7 @@ class LLM(BaseLLM):
self.additional_params = kwargs
self.is_anthropic = self._is_anthropic_model(model)
self.stream = stream
self.extra_headers = extra_headers
litellm.drop_params = True
@@ -408,6 +410,7 @@ class LLM(BaseLLM):
"stream": self.stream,
"tools": tools,
"reasoning_effort": self.reasoning_effort,
"extra_headers": self.extra_headers,
**self.additional_params,
}

View File

@@ -509,6 +509,85 @@ def test_deepseek_r1_with_open_router():
assert "Paris" in result
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_passes_extra_headers():
"""Test that extra_headers parameter is passed to litellm.completion."""
extra_headers = {
"X-Custom-Auth": "bearer token123",
"X-API-Version": "v1.0"
}
llm = LLM(
model="gpt-4o-mini",
extra_headers=extra_headers,
)
messages = [{"role": "user", "content": "Hello, world!"}]
with patch("litellm.completion") as mocked_completion:
mock_message = MagicMock()
mock_message.content = "Test response"
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = {
"prompt_tokens": 5,
"completion_tokens": 5,
"total_tokens": 10,
}
mocked_completion.return_value = mock_response
result = llm.call(messages)
mocked_completion.assert_called_once()
_, kwargs = mocked_completion.call_args
assert kwargs["extra_headers"] == extra_headers
assert kwargs["model"] == "gpt-4o-mini"
assert kwargs["messages"] == messages
assert result == "Test response"
def test_llm_extra_headers_none_by_default():
"""Test that extra_headers defaults to None and doesn't break existing functionality."""
llm = LLM(model="gpt-4o-mini")
messages = [{"role": "user", "content": "Hello, world!"}]
with patch("litellm.completion") as mocked_completion:
mock_message = MagicMock()
mock_message.content = "Test response"
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = {
"prompt_tokens": 5,
"completion_tokens": 5,
"total_tokens": 10,
}
mocked_completion.return_value = mock_response
result = llm.call(messages)
mocked_completion.assert_called_once()
_, kwargs = mocked_completion.call_args
assert "extra_headers" not in kwargs
assert kwargs["model"] == "gpt-4o-mini"
assert kwargs["messages"] == messages
assert result == "Test response"
def assert_event_count(
mock_emit,
expected_completed_tool_call: int = 0,

6382
uv.lock generated

File diff suppressed because it is too large Load Diff