mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-30 10:38:14 +00:00
Compare commits
3 Commits
0.148.0
...
devin/1752
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cf5f0a3553 | ||
|
|
bb19998fe7 | ||
|
|
bf248d5118 |
@@ -10,8 +10,6 @@ Neatlogs helps you **see what your agent did**, **why**, and **share it**.
|
||||
|
||||
It captures every step: thoughts, tool calls, responses, evaluations. No raw logs. Just clear, structured traces. Great for debugging and collaboration.
|
||||
|
||||
---
|
||||
|
||||
## Why use Neatlogs?
|
||||
|
||||
CrewAI agents use multiple tools and reasoning steps. When something goes wrong, you need context — not just errors.
|
||||
@@ -37,8 +35,6 @@ The best UX to view a CrewAI trace. Post comments anywhere you want. Use AI to d
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
## Core Features
|
||||
|
||||
- **Trace Viewer**: Track thoughts, tools, and decisions in sequence
|
||||
@@ -49,8 +45,6 @@ The best UX to view a CrewAI trace. Post comments anywhere you want. Use AI to d
|
||||
- **Ask the Trace (AI)**: Chat with your trace using Neatlogs AI bot
|
||||
- **Public Sharing**: Publish trace links to your community
|
||||
|
||||
---
|
||||
|
||||
## Quick Setup with CrewAI
|
||||
|
||||
<Steps>
|
||||
@@ -61,7 +55,7 @@ The best UX to view a CrewAI trace. Post comments anywhere you want. Use AI to d
|
||||
```bash
|
||||
pip install neatlogs
|
||||
```
|
||||
(Latest version 0.8.0, Python 3.8+; MIT license) :contentReference[oaicite:1]{index=1}
|
||||
(Latest version 0.8.0, Python 3.8+; MIT license)
|
||||
</Step>
|
||||
<Step title="Initialize Neatlogs">
|
||||
Before starting Crew agents, add:
|
||||
@@ -76,18 +70,18 @@ The best UX to view a CrewAI trace. Post comments anywhere you want. Use AI to d
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Under the Hood
|
||||
|
||||
According to GitHub, Neatlogs:
|
||||
|
||||
- Captures thoughts, tool calls, responses, errors, and token stats :contentReference[oaicite:2]{index=2}
|
||||
- Supports AI-powered task generation and robust evaluation workflows :contentReference[oaicite:3]{index=3}
|
||||
- Captures thoughts, tool calls, responses, errors, and token stats
|
||||
- Supports AI-powered task generation and robust evaluation workflows
|
||||
|
||||
All with just two lines of code.
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Watch It Work
|
||||
|
||||
@@ -113,7 +107,7 @@ All with just two lines of code.
|
||||
allowFullScreen
|
||||
></iframe>
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Links & Support
|
||||
|
||||
@@ -121,9 +115,9 @@ All with just two lines of code.
|
||||
- 🔐 [Dashboard & API Key](https://app.neatlogs.com/)
|
||||
- 🐦 [Follow on Twitter](https://twitter.com/neatlogs)
|
||||
- 📧 Contact: hello@neatlogs.com
|
||||
- 🛠 [GitHub SDK](https://github.com/NeatLogs/neatlogs) :contentReference[oaicite:4]{index=4}
|
||||
- 🛠 [GitHub SDK](https://github.com/NeatLogs/neatlogs)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## TL;DR
|
||||
|
||||
|
||||
@@ -311,6 +311,7 @@ class LLM(BaseLLM):
|
||||
callbacks: List[Any] = [],
|
||||
reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None,
|
||||
stream: bool = False,
|
||||
extra_headers: Optional[Dict[str, str]] = None,
|
||||
**kwargs,
|
||||
):
|
||||
self.model = model
|
||||
@@ -337,6 +338,7 @@ class LLM(BaseLLM):
|
||||
self.additional_params = kwargs
|
||||
self.is_anthropic = self._is_anthropic_model(model)
|
||||
self.stream = stream
|
||||
self.extra_headers = extra_headers
|
||||
|
||||
litellm.drop_params = True
|
||||
|
||||
@@ -408,6 +410,7 @@ class LLM(BaseLLM):
|
||||
"stream": self.stream,
|
||||
"tools": tools,
|
||||
"reasoning_effort": self.reasoning_effort,
|
||||
"extra_headers": self.extra_headers,
|
||||
**self.additional_params,
|
||||
}
|
||||
|
||||
|
||||
@@ -509,6 +509,85 @@ def test_deepseek_r1_with_open_router():
|
||||
assert "Paris" in result
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_passes_extra_headers():
|
||||
"""Test that extra_headers parameter is passed to litellm.completion."""
|
||||
extra_headers = {
|
||||
"X-Custom-Auth": "bearer token123",
|
||||
"X-API-Version": "v1.0"
|
||||
}
|
||||
|
||||
llm = LLM(
|
||||
model="gpt-4o-mini",
|
||||
extra_headers=extra_headers,
|
||||
)
|
||||
|
||||
messages = [{"role": "user", "content": "Hello, world!"}]
|
||||
|
||||
with patch("litellm.completion") as mocked_completion:
|
||||
mock_message = MagicMock()
|
||||
mock_message.content = "Test response"
|
||||
mock_choice = MagicMock()
|
||||
mock_choice.message = mock_message
|
||||
mock_response = MagicMock()
|
||||
mock_response.choices = [mock_choice]
|
||||
mock_response.usage = {
|
||||
"prompt_tokens": 5,
|
||||
"completion_tokens": 5,
|
||||
"total_tokens": 10,
|
||||
}
|
||||
|
||||
mocked_completion.return_value = mock_response
|
||||
|
||||
result = llm.call(messages)
|
||||
|
||||
mocked_completion.assert_called_once()
|
||||
|
||||
_, kwargs = mocked_completion.call_args
|
||||
|
||||
assert kwargs["extra_headers"] == extra_headers
|
||||
|
||||
assert kwargs["model"] == "gpt-4o-mini"
|
||||
assert kwargs["messages"] == messages
|
||||
|
||||
assert result == "Test response"
|
||||
|
||||
|
||||
def test_llm_extra_headers_none_by_default():
|
||||
"""Test that extra_headers defaults to None and doesn't break existing functionality."""
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
|
||||
messages = [{"role": "user", "content": "Hello, world!"}]
|
||||
|
||||
with patch("litellm.completion") as mocked_completion:
|
||||
mock_message = MagicMock()
|
||||
mock_message.content = "Test response"
|
||||
mock_choice = MagicMock()
|
||||
mock_choice.message = mock_message
|
||||
mock_response = MagicMock()
|
||||
mock_response.choices = [mock_choice]
|
||||
mock_response.usage = {
|
||||
"prompt_tokens": 5,
|
||||
"completion_tokens": 5,
|
||||
"total_tokens": 10,
|
||||
}
|
||||
|
||||
mocked_completion.return_value = mock_response
|
||||
|
||||
result = llm.call(messages)
|
||||
|
||||
mocked_completion.assert_called_once()
|
||||
|
||||
_, kwargs = mocked_completion.call_args
|
||||
|
||||
assert "extra_headers" not in kwargs
|
||||
|
||||
assert kwargs["model"] == "gpt-4o-mini"
|
||||
assert kwargs["messages"] == messages
|
||||
|
||||
assert result == "Test response"
|
||||
|
||||
|
||||
def assert_event_count(
|
||||
mock_emit,
|
||||
expected_completed_tool_call: int = 0,
|
||||
|
||||
Reference in New Issue
Block a user