mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-04-12 05:52:39 +00:00
Compare commits
2 Commits
devin/1752
...
lg-evaluat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a3a05bf7f | ||
|
|
a56bfa3c2c |
@@ -9,7 +9,12 @@
|
||||
},
|
||||
"favicon": "/images/favicon.svg",
|
||||
"contextual": {
|
||||
"options": ["copy", "view", "chatgpt", "claude"]
|
||||
"options": [
|
||||
"copy",
|
||||
"view",
|
||||
"chatgpt",
|
||||
"claude"
|
||||
]
|
||||
},
|
||||
"navigation": {
|
||||
"languages": [
|
||||
@@ -50,22 +55,32 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "Get Started",
|
||||
"pages": ["en/introduction", "en/installation", "en/quickstart"]
|
||||
"pages": [
|
||||
"en/introduction",
|
||||
"en/installation",
|
||||
"en/quickstart"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Guides",
|
||||
"pages": [
|
||||
{
|
||||
"group": "Strategy",
|
||||
"pages": ["en/guides/concepts/evaluating-use-cases"]
|
||||
"pages": [
|
||||
"en/guides/concepts/evaluating-use-cases"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Agents",
|
||||
"pages": ["en/guides/agents/crafting-effective-agents"]
|
||||
"pages": [
|
||||
"en/guides/agents/crafting-effective-agents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Crews",
|
||||
"pages": ["en/guides/crews/first-crew"]
|
||||
"pages": [
|
||||
"en/guides/crews/first-crew"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Flows",
|
||||
@@ -79,6 +94,7 @@
|
||||
"pages": [
|
||||
"en/guides/advanced/customizing-prompts",
|
||||
"en/guides/advanced/fingerprinting"
|
||||
|
||||
]
|
||||
}
|
||||
]
|
||||
@@ -225,7 +241,6 @@
|
||||
"en/observability/langtrace",
|
||||
"en/observability/maxim",
|
||||
"en/observability/mlflow",
|
||||
"en/observability/neatlogs",
|
||||
"en/observability/openlit",
|
||||
"en/observability/opik",
|
||||
"en/observability/patronus-evaluation",
|
||||
@@ -259,7 +274,9 @@
|
||||
},
|
||||
{
|
||||
"group": "Telemetry",
|
||||
"pages": ["en/telemetry"]
|
||||
"pages": [
|
||||
"en/telemetry"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -268,7 +285,9 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "Getting Started",
|
||||
"pages": ["en/enterprise/introduction"]
|
||||
"pages": [
|
||||
"en/enterprise/introduction"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Features",
|
||||
@@ -323,7 +342,9 @@
|
||||
},
|
||||
{
|
||||
"group": "Resources",
|
||||
"pages": ["en/enterprise/resources/frequently-asked-questions"]
|
||||
"pages": [
|
||||
"en/enterprise/resources/frequently-asked-questions"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -332,7 +353,9 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "Getting Started",
|
||||
"pages": ["en/api-reference/introduction"]
|
||||
"pages": [
|
||||
"en/api-reference/introduction"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Endpoints",
|
||||
@@ -342,13 +365,16 @@
|
||||
},
|
||||
{
|
||||
"tab": "Examples",
|
||||
"groups": [
|
||||
"groups": [
|
||||
{
|
||||
"group": "Examples",
|
||||
"pages": ["en/examples/example"]
|
||||
"pages": [
|
||||
"en/examples/example"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -399,15 +425,21 @@
|
||||
"pages": [
|
||||
{
|
||||
"group": "Estratégia",
|
||||
"pages": ["pt-BR/guides/concepts/evaluating-use-cases"]
|
||||
"pages": [
|
||||
"pt-BR/guides/concepts/evaluating-use-cases"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Agentes",
|
||||
"pages": ["pt-BR/guides/agents/crafting-effective-agents"]
|
||||
"pages": [
|
||||
"pt-BR/guides/agents/crafting-effective-agents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Crews",
|
||||
"pages": ["pt-BR/guides/crews/first-crew"]
|
||||
"pages": [
|
||||
"pt-BR/guides/crews/first-crew"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Flows",
|
||||
@@ -600,7 +632,9 @@
|
||||
},
|
||||
{
|
||||
"group": "Telemetria",
|
||||
"pages": ["pt-BR/telemetry"]
|
||||
"pages": [
|
||||
"pt-BR/telemetry"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -609,7 +643,9 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "Começando",
|
||||
"pages": ["pt-BR/enterprise/introduction"]
|
||||
"pages": [
|
||||
"pt-BR/enterprise/introduction"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Funcionalidades",
|
||||
@@ -674,7 +710,9 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "Começando",
|
||||
"pages": ["pt-BR/api-reference/introduction"]
|
||||
"pages": [
|
||||
"pt-BR/api-reference/introduction"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Endpoints",
|
||||
@@ -684,13 +722,16 @@
|
||||
},
|
||||
{
|
||||
"tab": "Exemplos",
|
||||
"groups": [
|
||||
"groups": [
|
||||
{
|
||||
"group": "Exemplos",
|
||||
"pages": ["pt-BR/examples/example"]
|
||||
"pages": [
|
||||
"pt-BR/examples/example"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,140 +0,0 @@
|
||||
---
|
||||
title: Neatlogs Integration
|
||||
description: Understand, debug, and share your CrewAI agent runs
|
||||
icon: magnifying-glass-chart
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
Neatlogs helps you **see what your agent did**, **why**, and **share it**.
|
||||
|
||||
It captures every step: thoughts, tool calls, responses, evaluations. No raw logs. Just clear, structured traces. Great for debugging and collaboration.
|
||||
|
||||
---
|
||||
|
||||
## Why use Neatlogs?
|
||||
|
||||
CrewAI agents use multiple tools and reasoning steps. When something goes wrong, you need context — not just errors.
|
||||
|
||||
Neatlogs lets you:
|
||||
|
||||
- Follow the full decision path
|
||||
- Add feedback directly on steps
|
||||
- Chat with the trace using AI assistant
|
||||
- Share runs publicly for feedback
|
||||
- Turn insights into tasks
|
||||
|
||||
All in one place.
|
||||
|
||||
Manage your traces effortlessly
|
||||
|
||||

|
||||

|
||||
|
||||
The best UX to view a CrewAI trace. Post comments anywhere you want. Use AI to debug.
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
## Core Features
|
||||
|
||||
- **Trace Viewer**: Track thoughts, tools, and decisions in sequence
|
||||
- **Inline Comments**: Tag teammates on any trace step
|
||||
- **Feedback & Evaluation**: Mark outputs as correct or incorrect
|
||||
- **Error Highlighting**: Automatic flagging of API/tool failures
|
||||
- **Task Conversion**: Convert comments into assigned tasks
|
||||
- **Ask the Trace (AI)**: Chat with your trace using Neatlogs AI bot
|
||||
- **Public Sharing**: Publish trace links to your community
|
||||
|
||||
---
|
||||
|
||||
## Quick Setup with CrewAI
|
||||
|
||||
<Steps>
|
||||
<Step title="Sign Up & Get API Key">
|
||||
Visit [neatlogs.com](https://neatlogs.com/?utm_source=crewAI-docs), create a project, copy the API key.
|
||||
</Step>
|
||||
<Step title="Install SDK">
|
||||
```bash
|
||||
pip install neatlogs
|
||||
```
|
||||
(Latest version 0.8.0, Python 3.8+; MIT license) :contentReference[oaicite:1]{index=1}
|
||||
</Step>
|
||||
<Step title="Initialize Neatlogs">
|
||||
Before starting Crew agents, add:
|
||||
|
||||
```python
|
||||
import neatlogs
|
||||
neatlogs.init("YOUR_PROJECT_API_KEY")
|
||||
```
|
||||
|
||||
Agents run as usual. Neatlogs captures everything automatically.
|
||||
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
---
|
||||
|
||||
## Under the Hood
|
||||
|
||||
According to GitHub, Neatlogs:
|
||||
|
||||
- Captures thoughts, tool calls, responses, errors, and token stats :contentReference[oaicite:2]{index=2}
|
||||
- Supports AI-powered task generation and robust evaluation workflows :contentReference[oaicite:3]{index=3}
|
||||
|
||||
All with just two lines of code.
|
||||
|
||||
---
|
||||
|
||||
## Watch It Work
|
||||
|
||||
### 🔍 Full Demo (4 min)
|
||||
|
||||
<iframe
|
||||
width="100%"
|
||||
height="315"
|
||||
src="https://www.youtube.com/embed/8KDme9T2I7Q?si=b8oHteaBwFNs_Duk"
|
||||
title="YouTube video player"
|
||||
frameBorder="0"
|
||||
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
|
||||
allowFullScreen
|
||||
></iframe>
|
||||
|
||||
### ⚙️ CrewAI Integration (30 s)
|
||||
|
||||
<iframe
|
||||
className="w-full aspect-video rounded-xl"
|
||||
src="https://www.loom.com/embed/9c78b552af43452bb3e4783cb8d91230?sid=e9d7d370-a91a-49b0-809e-2f375d9e801d"
|
||||
title="Loom video player"
|
||||
frameBorder="0"
|
||||
allowFullScreen
|
||||
></iframe>
|
||||
|
||||
---
|
||||
|
||||
## Links & Support
|
||||
|
||||
- 📘 [Neatlogs Docs](https://docs.neatlogs.com/)
|
||||
- 🔐 [Dashboard & API Key](https://app.neatlogs.com/)
|
||||
- 🐦 [Follow on Twitter](https://twitter.com/neatlogs)
|
||||
- 📧 Contact: hello@neatlogs.com
|
||||
- 🛠 [GitHub SDK](https://github.com/NeatLogs/neatlogs) :contentReference[oaicite:4]{index=4}
|
||||
|
||||
---
|
||||
|
||||
## TL;DR
|
||||
|
||||
With just:
|
||||
|
||||
```bash
|
||||
pip install neatlogs
|
||||
|
||||
import neatlogs
|
||||
neatlogs.init("YOUR_API_KEY")
|
||||
|
||||
You can now capture, understand, share, and act on your CrewAI agent runs in seconds.
|
||||
No setup overhead. Full trace transparency. Full team collaboration.
|
||||
```
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 222 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 329 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 590 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 216 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 277 KiB |
@@ -1,107 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Reproduction script for issue #3165: LLM Failed with Custom OpenAI-Compatible Endpoint
|
||||
|
||||
This script reproduces the bug where CrewAI shows generic "LLM Failed" errors
|
||||
instead of propagating specific error details from custom endpoints.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai.llm import LLM
|
||||
|
||||
def test_custom_endpoint_error_handling():
|
||||
"""Test error handling with a custom OpenAI-compatible endpoint."""
|
||||
|
||||
print("Testing custom endpoint error handling...")
|
||||
|
||||
custom_llm = LLM(
|
||||
model="gpt-3.5-turbo",
|
||||
base_url="https://non-existent-endpoint.example.com/v1",
|
||||
api_key="fake-api-key-for-testing"
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test custom endpoint error handling",
|
||||
backstory="A test agent for reproducing issue #3165",
|
||||
llm=custom_llm,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Say hello world",
|
||||
expected_output="A simple greeting",
|
||||
agent=agent
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
try:
|
||||
print("\nAttempting to run crew with custom endpoint...")
|
||||
result = crew.kickoff()
|
||||
print(f"Unexpected success: {result}")
|
||||
except Exception as e:
|
||||
print(f"\nCaught exception: {type(e).__name__}")
|
||||
print(f"Exception message: {str(e)}")
|
||||
|
||||
if "LLM Failed" in str(e) and "connection" not in str(e).lower():
|
||||
print("\n❌ BUG CONFIRMED: Generic 'LLM Failed' error without specific details")
|
||||
print("Expected: Specific connection/authentication error details")
|
||||
return False
|
||||
else:
|
||||
print("\n✅ Good: Specific error details preserved")
|
||||
return True
|
||||
|
||||
def test_direct_llm_call():
|
||||
"""Test direct LLM call with custom endpoint."""
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Testing direct LLM call with custom endpoint...")
|
||||
|
||||
custom_llm = LLM(
|
||||
model="gpt-3.5-turbo",
|
||||
base_url="https://non-existent-endpoint.example.com/v1",
|
||||
api_key="fake-api-key-for-testing"
|
||||
)
|
||||
|
||||
try:
|
||||
print("Attempting direct LLM call...")
|
||||
response = custom_llm.call("Hello world")
|
||||
print(f"Unexpected success: {response}")
|
||||
except Exception as e:
|
||||
print(f"\nCaught exception: {type(e).__name__}")
|
||||
print(f"Exception message: {str(e)}")
|
||||
|
||||
error_msg = str(e).lower()
|
||||
if any(keyword in error_msg for keyword in ["connection", "resolve", "network", "timeout", "unreachable"]):
|
||||
print("\n✅ Good: Specific connection error details preserved")
|
||||
return True
|
||||
else:
|
||||
print("\n❌ BUG CONFIRMED: Generic error without connection details")
|
||||
print("Expected: Specific connection error details")
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Reproducing issue #3165: LLM Failed with Custom OpenAI-Compatible Endpoint")
|
||||
print("="*80)
|
||||
|
||||
crew_test_passed = test_custom_endpoint_error_handling()
|
||||
direct_test_passed = test_direct_llm_call()
|
||||
|
||||
print("\n" + "="*80)
|
||||
print("SUMMARY:")
|
||||
print(f"Crew-level test: {'PASSED' if crew_test_passed else 'FAILED (bug confirmed)'}")
|
||||
print(f"Direct LLM test: {'PASSED' if direct_test_passed else 'FAILED (bug confirmed)'}")
|
||||
|
||||
if not crew_test_passed or not direct_test_passed:
|
||||
print("\n❌ Issue #3165 reproduced successfully")
|
||||
print("CrewAI is showing generic errors instead of specific endpoint error details")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("\n✅ Issue #3165 appears to be fixed")
|
||||
sys.exit(0)
|
||||
@@ -984,27 +984,10 @@ class LLM(BaseLLM):
|
||||
# whether to summarize the content or abort based on the respect_context_window flag
|
||||
raise
|
||||
except Exception as e:
|
||||
error_info = {
|
||||
"error_type": type(e).__name__,
|
||||
"original_error": str(e),
|
||||
"endpoint_info": {
|
||||
"base_url": self.base_url,
|
||||
"model": self.model,
|
||||
"api_base": self.api_base,
|
||||
} if self.base_url or self.api_base else None
|
||||
}
|
||||
|
||||
assert hasattr(crewai_event_bus, "emit")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(
|
||||
error=str(e),
|
||||
error_type=error_info["error_type"],
|
||||
original_error=error_info["original_error"],
|
||||
endpoint_info=error_info["endpoint_info"],
|
||||
from_task=from_task,
|
||||
from_agent=from_agent
|
||||
),
|
||||
event=LLMCallFailedEvent(error=str(e), from_task=from_task, from_agent=from_agent),
|
||||
)
|
||||
logging.error(f"LiteLLM call failed: {str(e)}")
|
||||
raise
|
||||
|
||||
@@ -361,7 +361,6 @@ class EventListener(BaseEventListener):
|
||||
self.formatter.current_tool_branch,
|
||||
event.error,
|
||||
self.formatter.current_crew_tree,
|
||||
event,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(LLMStreamChunkEvent)
|
||||
|
||||
@@ -67,9 +67,6 @@ class LLMCallFailedEvent(LLMEventBase):
|
||||
|
||||
error: str
|
||||
type: str = "llm_call_failed"
|
||||
error_type: Optional[str] = None
|
||||
original_error: Optional[str] = None
|
||||
endpoint_info: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class FunctionCall(BaseModel):
|
||||
|
||||
@@ -721,7 +721,7 @@ class ConsoleFormatter:
|
||||
self.print()
|
||||
|
||||
def handle_llm_call_failed(
|
||||
self, tool_branch: Optional[Tree], error: str, crew_tree: Optional[Tree], event: Optional[Any] = None
|
||||
self, tool_branch: Optional[Tree], error: str, crew_tree: Optional[Tree]
|
||||
) -> None:
|
||||
"""Handle LLM call failed event."""
|
||||
if not self.verbose:
|
||||
@@ -764,19 +764,9 @@ class ConsoleFormatter:
|
||||
self.print(tree_to_use)
|
||||
self.print()
|
||||
|
||||
# Show detailed error panel
|
||||
# Show error panel
|
||||
error_content = Text()
|
||||
error_content.append("❌ LLM Call Failed\n", style="red bold")
|
||||
|
||||
if event and hasattr(event, 'error_type') and event.error_type:
|
||||
error_content.append(f"Error Type: {event.error_type}\n", style="yellow")
|
||||
|
||||
if event and hasattr(event, 'endpoint_info') and event.endpoint_info:
|
||||
endpoint = event.endpoint_info.get('base_url') or event.endpoint_info.get('api_base')
|
||||
if endpoint:
|
||||
error_content.append(f"Endpoint: {endpoint}\n", style="cyan")
|
||||
error_content.append(f"Model: {event.endpoint_info.get('model', 'unknown')}\n", style="cyan")
|
||||
|
||||
error_content.append("Error: ", style="white")
|
||||
error_content.append(str(error), style="red")
|
||||
|
||||
|
||||
@@ -1,185 +0,0 @@
|
||||
"""
|
||||
Tests for custom endpoint error handling (issue #3165).
|
||||
|
||||
These tests verify that CrewAI properly propagates specific error details
|
||||
from custom OpenAI-compatible endpoints instead of showing generic "LLM Failed" errors.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
from crewai.llm import LLM
|
||||
from crewai.utilities.events.llm_events import LLMCallFailedEvent
|
||||
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
|
||||
import requests
|
||||
|
||||
|
||||
class TestCustomEndpointErrorHandling:
|
||||
"""Test error handling for custom OpenAI-compatible endpoints."""
|
||||
|
||||
def test_connection_error_preserves_details(self):
|
||||
"""Test that connection errors preserve specific error details."""
|
||||
custom_llm = LLM(
|
||||
model="gpt-3.5-turbo",
|
||||
base_url="https://non-existent-endpoint.example.com/v1",
|
||||
api_key="fake-api-key"
|
||||
)
|
||||
|
||||
with patch('litellm.completion') as mock_completion:
|
||||
mock_completion.side_effect = requests.exceptions.ConnectionError(
|
||||
"Failed to establish a new connection: [Errno -2] Name or service not known"
|
||||
)
|
||||
|
||||
with pytest.raises(requests.exceptions.ConnectionError) as exc_info:
|
||||
custom_llm.call("Hello world")
|
||||
|
||||
assert "Name or service not known" in str(exc_info.value)
|
||||
|
||||
def test_authentication_error_preserves_details(self):
|
||||
"""Test that authentication errors preserve specific error details."""
|
||||
custom_llm = LLM(
|
||||
model="gpt-3.5-turbo",
|
||||
base_url="https://api.openai.com/v1",
|
||||
api_key="invalid-api-key"
|
||||
)
|
||||
|
||||
with patch('litellm.completion') as mock_completion:
|
||||
mock_completion.side_effect = Exception(
|
||||
"AuthenticationError: Incorrect API key provided"
|
||||
)
|
||||
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
custom_llm.call("Hello world")
|
||||
|
||||
assert "AuthenticationError" in str(exc_info.value)
|
||||
assert "Incorrect API key" in str(exc_info.value)
|
||||
|
||||
def test_llm_call_failed_event_enhanced_fields(self):
|
||||
"""Test that LLMCallFailedEvent includes enhanced error information."""
|
||||
custom_llm = LLM(
|
||||
model="gpt-3.5-turbo",
|
||||
base_url="https://custom-endpoint.example.com/v1",
|
||||
api_key="test-key"
|
||||
)
|
||||
|
||||
captured_events = []
|
||||
|
||||
def capture_event(sender, event):
|
||||
captured_events.append(event)
|
||||
|
||||
with patch('crewai.utilities.events.crewai_event_bus.crewai_event_bus.emit', side_effect=capture_event):
|
||||
with patch('litellm.completion') as mock_completion:
|
||||
mock_completion.side_effect = requests.exceptions.ConnectionError(
|
||||
"Connection failed"
|
||||
)
|
||||
|
||||
with pytest.raises(requests.exceptions.ConnectionError):
|
||||
custom_llm.call("Hello world")
|
||||
|
||||
assert len(captured_events) == 2 # Started and Failed events
|
||||
failed_event = captured_events[1]
|
||||
assert isinstance(failed_event, LLMCallFailedEvent)
|
||||
assert failed_event.error_type == "ConnectionError"
|
||||
assert failed_event.original_error == "Connection failed"
|
||||
assert failed_event.endpoint_info is not None
|
||||
assert failed_event.endpoint_info["base_url"] == "https://custom-endpoint.example.com/v1"
|
||||
assert failed_event.endpoint_info["model"] == "gpt-3.5-turbo"
|
||||
|
||||
def test_console_formatter_displays_enhanced_error_info(self):
|
||||
"""Test that console formatter displays enhanced error information."""
|
||||
formatter = ConsoleFormatter(verbose=True)
|
||||
|
||||
mock_event = MagicMock()
|
||||
mock_event.error_type = "ConnectionError"
|
||||
mock_event.endpoint_info = {
|
||||
"base_url": "https://custom-endpoint.example.com/v1",
|
||||
"model": "gpt-3.5-turbo"
|
||||
}
|
||||
|
||||
captured_output = []
|
||||
|
||||
def mock_print_panel(content, title, style):
|
||||
captured_output.append(str(content))
|
||||
|
||||
formatter.print_panel = mock_print_panel
|
||||
|
||||
formatter.handle_llm_call_failed(
|
||||
tool_branch=None,
|
||||
error="Connection failed",
|
||||
crew_tree=None,
|
||||
event=mock_event
|
||||
)
|
||||
|
||||
output = captured_output[0]
|
||||
assert "Error Type: ConnectionError" in output
|
||||
assert "Endpoint: https://custom-endpoint.example.com/v1" in output
|
||||
assert "Model: gpt-3.5-turbo" in output
|
||||
assert "Connection failed" in output
|
||||
|
||||
def test_backward_compatibility_without_enhanced_fields(self):
|
||||
"""Test that console formatter works without enhanced fields for backward compatibility."""
|
||||
formatter = ConsoleFormatter(verbose=True)
|
||||
|
||||
captured_output = []
|
||||
|
||||
def mock_print_panel(content, title, style):
|
||||
captured_output.append(str(content))
|
||||
|
||||
formatter.print_panel = mock_print_panel
|
||||
|
||||
formatter.handle_llm_call_failed(
|
||||
tool_branch=None,
|
||||
error="Generic error message",
|
||||
crew_tree=None,
|
||||
event=None
|
||||
)
|
||||
|
||||
output = captured_output[0]
|
||||
assert "❌ LLM Call Failed" in output
|
||||
assert "Generic error message" in output
|
||||
assert "Error Type:" not in output
|
||||
assert "Endpoint:" not in output
|
||||
|
||||
def test_streaming_response_error_handling(self):
|
||||
"""Test that streaming responses also preserve error details."""
|
||||
custom_llm = LLM(
|
||||
model="gpt-3.5-turbo",
|
||||
base_url="https://custom-endpoint.example.com/v1",
|
||||
api_key="test-key",
|
||||
stream=True
|
||||
)
|
||||
|
||||
with patch('litellm.completion') as mock_completion:
|
||||
mock_completion.side_effect = requests.exceptions.ConnectionError(
|
||||
"Streaming connection failed"
|
||||
)
|
||||
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
custom_llm.call("Hello world")
|
||||
|
||||
assert "Streaming connection failed" in str(exc_info.value)
|
||||
|
||||
def test_non_custom_endpoint_error_handling(self):
|
||||
"""Test that standard OpenAI endpoint errors are handled normally."""
|
||||
standard_llm = LLM(
|
||||
model="gpt-3.5-turbo",
|
||||
api_key="test-key"
|
||||
)
|
||||
|
||||
captured_events = []
|
||||
|
||||
def capture_event(sender, event):
|
||||
captured_events.append(event)
|
||||
|
||||
with patch('crewai.utilities.events.crewai_event_bus.crewai_event_bus.emit', side_effect=capture_event):
|
||||
with patch('litellm.completion') as mock_completion:
|
||||
mock_completion.side_effect = Exception("Standard API error")
|
||||
|
||||
with pytest.raises(Exception):
|
||||
standard_llm.call("Hello world")
|
||||
|
||||
assert len(captured_events) == 2 # Started and Failed events
|
||||
failed_event = captured_events[1]
|
||||
assert isinstance(failed_event, LLMCallFailedEvent)
|
||||
assert failed_event.error_type == "Exception"
|
||||
assert failed_event.original_error == "Standard API error"
|
||||
assert failed_event.endpoint_info is None # No custom endpoint info
|
||||
Reference in New Issue
Block a user