Compare commits

...

3 Commits

Author SHA1 Message Date
Devin AI
cf5f0a3553 fix: regenerate uv.lock to resolve TOML parse errors
- Remove corrupted uv.lock file that had missing version field
- Regenerate with uv sync to ensure proper dependency resolution

Co-Authored-By: Jo\u00E3o <joao@crewai.com>
2025-07-17 09:07:45 +00:00
Devin AI
bb19998fe7 feat: add extra_headers parameter to LLM class
- Add extra_headers parameter to LLM constructor for custom authentication headers
- Update _prepare_completion_params to pass extra_headers to LiteLLM
- Add comprehensive tests for extra_headers functionality
- Ensure backward compatibility with None default value

Fixes #3177

Co-Authored-By: Jo\u00E3o <joao@crewai.com>
2025-07-17 09:07:32 +00:00
Lucas Gomide
bf248d5118 docs: fix neatlogs documentation (#3171)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
2025-07-16 21:18:04 -04:00
4 changed files with 3225 additions and 3261 deletions

View File

@@ -10,8 +10,6 @@ Neatlogs helps you **see what your agent did**, **why**, and **share it**.
It captures every step: thoughts, tool calls, responses, evaluations. No raw logs. Just clear, structured traces. Great for debugging and collaboration.
---
## Why use Neatlogs?
CrewAI agents use multiple tools and reasoning steps. When something goes wrong, you need context — not just errors.
@@ -37,8 +35,6 @@ The best UX to view a CrewAI trace. Post comments anywhere you want. Use AI to d
![Ai Chat Bot With A Trace](/images/neatlogs-4.png)
![Comments Drawer](/images/neatlogs-5.png)
---
## Core Features
- **Trace Viewer**: Track thoughts, tools, and decisions in sequence
@@ -49,8 +45,6 @@ The best UX to view a CrewAI trace. Post comments anywhere you want. Use AI to d
- **Ask the Trace (AI)**: Chat with your trace using Neatlogs AI bot
- **Public Sharing**: Publish trace links to your community
---
## Quick Setup with CrewAI
<Steps>
@@ -61,7 +55,7 @@ The best UX to view a CrewAI trace. Post comments anywhere you want. Use AI to d
```bash
pip install neatlogs
```
(Latest version 0.8.0, Python 3.8+; MIT license) :contentReference[oaicite:1]{index=1}
(Latest version 0.8.0, Python 3.8+; MIT license)
</Step>
<Step title="Initialize Neatlogs">
Before starting Crew agents, add:
@@ -76,18 +70,18 @@ The best UX to view a CrewAI trace. Post comments anywhere you want. Use AI to d
</Step>
</Steps>
---
## Under the Hood
According to GitHub, Neatlogs:
- Captures thoughts, tool calls, responses, errors, and token stats :contentReference[oaicite:2]{index=2}
- Supports AI-powered task generation and robust evaluation workflows :contentReference[oaicite:3]{index=3}
- Captures thoughts, tool calls, responses, errors, and token stats
- Supports AI-powered task generation and robust evaluation workflows
All with just two lines of code.
---
## Watch It Work
@@ -113,7 +107,7 @@ All with just two lines of code.
allowFullScreen
></iframe>
---
## Links & Support
@@ -121,9 +115,9 @@ All with just two lines of code.
- 🔐 [Dashboard & API Key](https://app.neatlogs.com/)
- 🐦 [Follow on Twitter](https://twitter.com/neatlogs)
- 📧 Contact: hello@neatlogs.com
- 🛠 [GitHub SDK](https://github.com/NeatLogs/neatlogs) :contentReference[oaicite:4]{index=4}
- 🛠 [GitHub SDK](https://github.com/NeatLogs/neatlogs)
---
## TL;DR

View File

@@ -311,6 +311,7 @@ class LLM(BaseLLM):
callbacks: List[Any] = [],
reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None,
stream: bool = False,
extra_headers: Optional[Dict[str, str]] = None,
**kwargs,
):
self.model = model
@@ -337,6 +338,7 @@ class LLM(BaseLLM):
self.additional_params = kwargs
self.is_anthropic = self._is_anthropic_model(model)
self.stream = stream
self.extra_headers = extra_headers
litellm.drop_params = True
@@ -408,6 +410,7 @@ class LLM(BaseLLM):
"stream": self.stream,
"tools": tools,
"reasoning_effort": self.reasoning_effort,
"extra_headers": self.extra_headers,
**self.additional_params,
}

View File

@@ -509,6 +509,85 @@ def test_deepseek_r1_with_open_router():
assert "Paris" in result
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_passes_extra_headers():
"""Test that extra_headers parameter is passed to litellm.completion."""
extra_headers = {
"X-Custom-Auth": "bearer token123",
"X-API-Version": "v1.0"
}
llm = LLM(
model="gpt-4o-mini",
extra_headers=extra_headers,
)
messages = [{"role": "user", "content": "Hello, world!"}]
with patch("litellm.completion") as mocked_completion:
mock_message = MagicMock()
mock_message.content = "Test response"
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = {
"prompt_tokens": 5,
"completion_tokens": 5,
"total_tokens": 10,
}
mocked_completion.return_value = mock_response
result = llm.call(messages)
mocked_completion.assert_called_once()
_, kwargs = mocked_completion.call_args
assert kwargs["extra_headers"] == extra_headers
assert kwargs["model"] == "gpt-4o-mini"
assert kwargs["messages"] == messages
assert result == "Test response"
def test_llm_extra_headers_none_by_default():
"""Test that extra_headers defaults to None and doesn't break existing functionality."""
llm = LLM(model="gpt-4o-mini")
messages = [{"role": "user", "content": "Hello, world!"}]
with patch("litellm.completion") as mocked_completion:
mock_message = MagicMock()
mock_message.content = "Test response"
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = {
"prompt_tokens": 5,
"completion_tokens": 5,
"total_tokens": 10,
}
mocked_completion.return_value = mock_response
result = llm.call(messages)
mocked_completion.assert_called_once()
_, kwargs = mocked_completion.call_args
assert "extra_headers" not in kwargs
assert kwargs["model"] == "gpt-4o-mini"
assert kwargs["messages"] == messages
assert result == "Test response"
def assert_event_count(
mock_emit,
expected_completed_tool_call: int = 0,

6382
uv.lock generated

File diff suppressed because it is too large Load Diff