mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-23 07:08:14 +00:00
Compare commits
7 Commits
devin/1768
...
devin/1749
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0938968a20 | ||
|
|
8538646f47 | ||
|
|
7e490f73df | ||
|
|
d1a0a4e258 | ||
|
|
bab03b2be1 | ||
|
|
5fcfba82dc | ||
|
|
519af74cf7 |
@@ -1,16 +1,20 @@
|
|||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
from crewai.agent import Agent
|
from crewai.agent import Agent
|
||||||
|
from crewai import agent
|
||||||
|
from crewai import cli
|
||||||
from crewai.crew import Crew
|
from crewai.crew import Crew
|
||||||
from crewai.crews.crew_output import CrewOutput
|
from crewai.crews.crew_output import CrewOutput
|
||||||
from crewai.flow.flow import Flow
|
from crewai.flow.flow import Flow
|
||||||
from crewai.knowledge.knowledge import Knowledge
|
from crewai.knowledge.knowledge import Knowledge
|
||||||
|
from crewai import knowledge
|
||||||
from crewai.llm import LLM
|
from crewai.llm import LLM
|
||||||
from crewai.llms.base_llm import BaseLLM
|
from crewai.llms.base_llm import BaseLLM
|
||||||
from crewai.process import Process
|
from crewai.process import Process
|
||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||||
from crewai.tasks.task_output import TaskOutput
|
from crewai.tasks.task_output import TaskOutput
|
||||||
|
from crewai import utilities
|
||||||
|
|
||||||
warnings.filterwarnings(
|
warnings.filterwarnings(
|
||||||
"ignore",
|
"ignore",
|
||||||
@@ -21,6 +25,8 @@ warnings.filterwarnings(
|
|||||||
__version__ = "0.126.0"
|
__version__ = "0.126.0"
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"Agent",
|
"Agent",
|
||||||
|
"agent",
|
||||||
|
"cli",
|
||||||
"Crew",
|
"Crew",
|
||||||
"CrewOutput",
|
"CrewOutput",
|
||||||
"Process",
|
"Process",
|
||||||
@@ -29,6 +35,8 @@ __all__ = [
|
|||||||
"BaseLLM",
|
"BaseLLM",
|
||||||
"Flow",
|
"Flow",
|
||||||
"Knowledge",
|
"Knowledge",
|
||||||
|
"knowledge",
|
||||||
"TaskOutput",
|
"TaskOutput",
|
||||||
"LLMGuardrail",
|
"LLMGuardrail",
|
||||||
|
"utilities",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
|
||||||
import threading
|
|
||||||
import warnings
|
import warnings
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
@@ -48,8 +46,7 @@ with warnings.catch_warnings():
|
|||||||
from litellm.utils import supports_response_schema
|
from litellm.utils import supports_response_schema
|
||||||
|
|
||||||
|
|
||||||
import io
|
|
||||||
from typing import TextIO
|
|
||||||
|
|
||||||
from crewai.llms.base_llm import BaseLLM
|
from crewai.llms.base_llm import BaseLLM
|
||||||
from crewai.utilities.events import crewai_event_bus
|
from crewai.utilities.events import crewai_event_bus
|
||||||
@@ -60,69 +57,7 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
|||||||
load_dotenv()
|
load_dotenv()
|
||||||
|
|
||||||
|
|
||||||
class FilteredStream(io.TextIOBase):
|
|
||||||
_lock = None
|
|
||||||
|
|
||||||
def __init__(self, original_stream: TextIO):
|
|
||||||
self._original_stream = original_stream
|
|
||||||
self._lock = threading.Lock()
|
|
||||||
|
|
||||||
def write(self, s: str) -> int:
|
|
||||||
if not self._lock:
|
|
||||||
self._lock = threading.Lock()
|
|
||||||
|
|
||||||
with self._lock:
|
|
||||||
lower_s = s.lower()
|
|
||||||
|
|
||||||
# Skip common noisy LiteLLM banners and any other lines that contain "litellm"
|
|
||||||
if (
|
|
||||||
"give feedback / get help" in lower_s
|
|
||||||
or "litellm.info:" in lower_s
|
|
||||||
or "litellm" in lower_s
|
|
||||||
or "Consider using a smaller input or implementing a text splitting strategy" in lower_s
|
|
||||||
):
|
|
||||||
return 0
|
|
||||||
|
|
||||||
return self._original_stream.write(s)
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
with self._lock:
|
|
||||||
return self._original_stream.flush()
|
|
||||||
|
|
||||||
def __getattr__(self, name):
|
|
||||||
"""Delegate attribute access to the wrapped original stream.
|
|
||||||
|
|
||||||
This ensures compatibility with libraries (e.g., Rich) that rely on
|
|
||||||
attributes such as `encoding`, `isatty`, `buffer`, etc., which may not
|
|
||||||
be explicitly defined on this proxy class.
|
|
||||||
"""
|
|
||||||
return getattr(self._original_stream, name)
|
|
||||||
|
|
||||||
# Delegate common properties/methods explicitly so they aren't shadowed by
|
|
||||||
# the TextIOBase defaults (e.g., .encoding returns None by default, which
|
|
||||||
# confuses Rich). These explicit pass-throughs ensure the wrapped Console
|
|
||||||
# still sees a fully-featured stream.
|
|
||||||
@property
|
|
||||||
def encoding(self):
|
|
||||||
return getattr(self._original_stream, "encoding", "utf-8")
|
|
||||||
|
|
||||||
def isatty(self):
|
|
||||||
return self._original_stream.isatty()
|
|
||||||
|
|
||||||
def fileno(self):
|
|
||||||
return self._original_stream.fileno()
|
|
||||||
|
|
||||||
def writable(self):
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
# Apply the filtered stream globally so that any subsequent writes containing the filtered
|
|
||||||
# keywords (e.g., "litellm") are hidden from terminal output. We guard against double
|
|
||||||
# wrapping to ensure idempotency in environments where this module might be reloaded.
|
|
||||||
if not isinstance(sys.stdout, FilteredStream):
|
|
||||||
sys.stdout = FilteredStream(sys.stdout)
|
|
||||||
if not isinstance(sys.stderr, FilteredStream):
|
|
||||||
sys.stderr = FilteredStream(sys.stderr)
|
|
||||||
|
|
||||||
|
|
||||||
LLM_CONTEXT_WINDOW_SIZES = {
|
LLM_CONTEXT_WINDOW_SIZES = {
|
||||||
@@ -266,6 +201,40 @@ def suppress_warnings():
|
|||||||
yield
|
yield
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def suppress_litellm_output():
|
||||||
|
"""Contextually suppress litellm-related logging output during LLM calls."""
|
||||||
|
litellm_logger = logging.getLogger("litellm")
|
||||||
|
original_level = litellm_logger.level
|
||||||
|
|
||||||
|
warning_patterns = [
|
||||||
|
".*give feedback.*",
|
||||||
|
".*Consider using a smaller input.*",
|
||||||
|
".*litellm\\.info:.*",
|
||||||
|
".*text splitting strategy.*"
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
for pattern in warning_patterns:
|
||||||
|
warnings.filterwarnings("ignore", message=pattern)
|
||||||
|
|
||||||
|
try:
|
||||||
|
litellm_logger.setLevel(logging.WARNING)
|
||||||
|
except Exception as e:
|
||||||
|
logging.debug(f"Error setting logger level: {e}")
|
||||||
|
|
||||||
|
yield
|
||||||
|
except Exception as e:
|
||||||
|
logging.debug(f"Error in litellm output suppression: {e}")
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
litellm_logger.setLevel(original_level)
|
||||||
|
except Exception as e:
|
||||||
|
logging.debug(f"Error restoring logger level: {e}")
|
||||||
|
|
||||||
|
|
||||||
class Delta(TypedDict):
|
class Delta(TypedDict):
|
||||||
content: Optional[str]
|
content: Optional[str]
|
||||||
role: Optional[str]
|
role: Optional[str]
|
||||||
@@ -450,60 +419,61 @@ class LLM(BaseLLM):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# --- 3) Process each chunk in the stream
|
# --- 3) Process each chunk in the stream
|
||||||
for chunk in litellm.completion(**params):
|
with suppress_litellm_output():
|
||||||
chunk_count += 1
|
for chunk in litellm.completion(**params):
|
||||||
last_chunk = chunk
|
chunk_count += 1
|
||||||
|
last_chunk = chunk
|
||||||
|
|
||||||
# Extract content from the chunk
|
# Extract content from the chunk
|
||||||
chunk_content = None
|
chunk_content = None
|
||||||
|
|
||||||
# Safely extract content from various chunk formats
|
# Safely extract content from various chunk formats
|
||||||
try:
|
try:
|
||||||
# Try to access choices safely
|
# Try to access choices safely
|
||||||
choices = None
|
choices = None
|
||||||
if isinstance(chunk, dict) and "choices" in chunk:
|
if isinstance(chunk, dict) and "choices" in chunk:
|
||||||
choices = chunk["choices"]
|
choices = chunk["choices"]
|
||||||
elif hasattr(chunk, "choices"):
|
elif hasattr(chunk, "choices"):
|
||||||
# Check if choices is not a type but an actual attribute with value
|
# Check if choices is not a type but an actual attribute with value
|
||||||
if not isinstance(getattr(chunk, "choices"), type):
|
if not isinstance(getattr(chunk, "choices"), type):
|
||||||
choices = getattr(chunk, "choices")
|
choices = getattr(chunk, "choices")
|
||||||
|
|
||||||
# Try to extract usage information if available
|
# Try to extract usage information if available
|
||||||
if isinstance(chunk, dict) and "usage" in chunk:
|
if isinstance(chunk, dict) and "usage" in chunk:
|
||||||
usage_info = chunk["usage"]
|
usage_info = chunk["usage"]
|
||||||
elif hasattr(chunk, "usage"):
|
elif hasattr(chunk, "usage"):
|
||||||
# Check if usage is not a type but an actual attribute with value
|
# Check if usage is not a type but an actual attribute with value
|
||||||
if not isinstance(getattr(chunk, "usage"), type):
|
if not isinstance(getattr(chunk, "usage"), type):
|
||||||
usage_info = getattr(chunk, "usage")
|
usage_info = getattr(chunk, "usage")
|
||||||
|
|
||||||
if choices and len(choices) > 0:
|
if choices and len(choices) > 0:
|
||||||
choice = choices[0]
|
choice = choices[0]
|
||||||
|
|
||||||
# Handle different delta formats
|
# Handle different delta formats
|
||||||
delta = None
|
delta = None
|
||||||
if isinstance(choice, dict) and "delta" in choice:
|
if isinstance(choice, dict) and "delta" in choice:
|
||||||
delta = choice["delta"]
|
delta = choice["delta"]
|
||||||
elif hasattr(choice, "delta"):
|
elif hasattr(choice, "delta"):
|
||||||
delta = getattr(choice, "delta")
|
delta = getattr(choice, "delta")
|
||||||
|
|
||||||
# Extract content from delta
|
# Extract content from delta
|
||||||
if delta:
|
if delta:
|
||||||
# Handle dict format
|
# Handle dict format
|
||||||
if isinstance(delta, dict):
|
if isinstance(delta, dict):
|
||||||
if "content" in delta and delta["content"] is not None:
|
if "content" in delta and delta["content"] is not None:
|
||||||
chunk_content = delta["content"]
|
chunk_content = delta["content"]
|
||||||
# Handle object format
|
# Handle object format
|
||||||
elif hasattr(delta, "content"):
|
elif hasattr(delta, "content"):
|
||||||
chunk_content = getattr(delta, "content")
|
chunk_content = getattr(delta, "content")
|
||||||
|
|
||||||
# Handle case where content might be None or empty
|
# Handle case where content might be None or empty
|
||||||
if chunk_content is None and isinstance(delta, dict):
|
if chunk_content is None and isinstance(delta, dict):
|
||||||
# Some models might send empty content chunks
|
# Some models might send empty content chunks
|
||||||
chunk_content = ""
|
chunk_content = ""
|
||||||
|
|
||||||
# Enable tool calls using streaming
|
# Enable tool calls using streaming
|
||||||
if "tool_calls" in delta:
|
if "tool_calls" in delta:
|
||||||
tool_calls = delta["tool_calls"]
|
tool_calls = delta["tool_calls"]
|
||||||
|
|
||||||
if tool_calls:
|
if tool_calls:
|
||||||
result = self._handle_streaming_tool_calls(
|
result = self._handle_streaming_tool_calls(
|
||||||
@@ -514,21 +484,22 @@ class LLM(BaseLLM):
|
|||||||
if result is not None:
|
if result is not None:
|
||||||
chunk_content = result
|
chunk_content = result
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.debug(f"Error extracting content from chunk: {e}")
|
logging.error(f"Error extracting content from chunk: {e}", exc_info=True)
|
||||||
logging.debug(f"Chunk format: {type(chunk)}, content: {chunk}")
|
logging.debug(f"Chunk format: {type(chunk)}, content: {chunk}")
|
||||||
|
continue
|
||||||
|
|
||||||
# Only add non-None content to the response
|
# Only add non-None content to the response
|
||||||
if chunk_content is not None:
|
if chunk_content is not None:
|
||||||
# Add the chunk content to the full response
|
# Add the chunk content to the full response
|
||||||
full_response += chunk_content
|
full_response += chunk_content
|
||||||
|
|
||||||
# Emit the chunk event
|
# Emit the chunk event
|
||||||
assert hasattr(crewai_event_bus, "emit")
|
assert hasattr(crewai_event_bus, "emit")
|
||||||
crewai_event_bus.emit(
|
crewai_event_bus.emit(
|
||||||
self,
|
self,
|
||||||
event=LLMStreamChunkEvent(chunk=chunk_content),
|
event=LLMStreamChunkEvent(chunk=chunk_content),
|
||||||
)
|
)
|
||||||
# --- 4) Fallback to non-streaming if no content received
|
# --- 4) Fallback to non-streaming if no content received
|
||||||
if not full_response.strip() and chunk_count == 0:
|
if not full_response.strip() and chunk_count == 0:
|
||||||
logging.warning(
|
logging.warning(
|
||||||
@@ -765,7 +736,8 @@ class LLM(BaseLLM):
|
|||||||
# and convert them to our own exception type for consistent handling
|
# and convert them to our own exception type for consistent handling
|
||||||
# across the codebase. This allows CrewAgentExecutor to handle context
|
# across the codebase. This allows CrewAgentExecutor to handle context
|
||||||
# length issues appropriately.
|
# length issues appropriately.
|
||||||
response = litellm.completion(**params)
|
with suppress_litellm_output():
|
||||||
|
response = litellm.completion(**params)
|
||||||
except ContextWindowExceededError as e:
|
except ContextWindowExceededError as e:
|
||||||
# Convert litellm's context window error to our own exception type
|
# Convert litellm's context window error to our own exception type
|
||||||
# for consistent handling in the rest of the codebase
|
# for consistent handling in the rest of the codebase
|
||||||
|
|||||||
171
tests/test_sys_stream_hijacking.py
Normal file
171
tests/test_sys_stream_hijacking.py
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
"""Test to reproduce and verify fix for issue #3000: sys.stdout/stderr hijacking."""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import io
|
||||||
|
from unittest.mock import patch, MagicMock
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
def test_crewai_hijacks_sys_streams():
|
||||||
|
"""Test that importing crewai.llm currently hijacks sys.stdout and sys.stderr (before fix)."""
|
||||||
|
original_stdout = sys.stdout
|
||||||
|
original_stderr = sys.stderr
|
||||||
|
|
||||||
|
import crewai.llm # noqa: F401
|
||||||
|
|
||||||
|
try:
|
||||||
|
assert sys.stdout is not original_stdout, "sys.stdout should be hijacked by FilteredStream"
|
||||||
|
assert sys.stderr is not original_stderr, "sys.stderr should be hijacked by FilteredStream"
|
||||||
|
assert hasattr(sys.stdout, '_original_stream'), "sys.stdout should be wrapped by FilteredStream"
|
||||||
|
assert hasattr(sys.stderr, '_original_stream'), "sys.stderr should be wrapped by FilteredStream"
|
||||||
|
assert False, "The fix didn't work - streams are still being hijacked"
|
||||||
|
except AssertionError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test_litellm_output_is_filtered():
|
||||||
|
"""Test that litellm-related output is currently filtered (before fix)."""
|
||||||
|
import crewai.llm # noqa: F401
|
||||||
|
|
||||||
|
captured_output = io.StringIO()
|
||||||
|
|
||||||
|
test_strings = [
|
||||||
|
"litellm.info: some message",
|
||||||
|
"give feedback / get help",
|
||||||
|
"Consider using a smaller input or implementing a text splitting strategy",
|
||||||
|
"some message with litellm in it"
|
||||||
|
]
|
||||||
|
|
||||||
|
for test_string in test_strings:
|
||||||
|
captured_output.seek(0)
|
||||||
|
captured_output.truncate(0)
|
||||||
|
|
||||||
|
original_stdout = sys.stdout
|
||||||
|
sys.stdout = captured_output
|
||||||
|
|
||||||
|
try:
|
||||||
|
print(test_string, end='')
|
||||||
|
assert captured_output.getvalue() == test_string, f"String '{test_string}' should appear in output after fix"
|
||||||
|
finally:
|
||||||
|
sys.stdout = original_stdout
|
||||||
|
|
||||||
|
|
||||||
|
def test_normal_output_passes_through():
|
||||||
|
"""Test that normal output passes through correctly after the fix."""
|
||||||
|
import crewai.llm # noqa: F401
|
||||||
|
|
||||||
|
captured_output = io.StringIO()
|
||||||
|
original_stdout = sys.stdout
|
||||||
|
sys.stdout = captured_output
|
||||||
|
|
||||||
|
try:
|
||||||
|
test_string = "This is normal output that should pass through"
|
||||||
|
print(test_string, end='')
|
||||||
|
|
||||||
|
assert captured_output.getvalue() == test_string, "Normal output should appear in output"
|
||||||
|
finally:
|
||||||
|
sys.stdout = original_stdout
|
||||||
|
|
||||||
|
|
||||||
|
def test_crewai_does_not_hijack_sys_streams_after_fix():
|
||||||
|
"""Test that after the fix, importing crewai.llm does NOT hijack sys.stdout and sys.stderr."""
|
||||||
|
original_stdout = sys.stdout
|
||||||
|
original_stderr = sys.stderr
|
||||||
|
|
||||||
|
if 'crewai.llm' in sys.modules:
|
||||||
|
del sys.modules['crewai.llm']
|
||||||
|
if 'crewai' in sys.modules:
|
||||||
|
del sys.modules['crewai']
|
||||||
|
|
||||||
|
import crewai.llm # noqa: F401
|
||||||
|
|
||||||
|
assert sys.stdout is original_stdout, "sys.stdout should NOT be hijacked after fix"
|
||||||
|
assert sys.stderr is original_stderr, "sys.stderr should NOT be hijacked after fix"
|
||||||
|
assert not hasattr(sys.stdout, '_original_stream'), "sys.stdout should not be wrapped after fix"
|
||||||
|
assert not hasattr(sys.stderr, '_original_stream'), "sys.stderr should not be wrapped after fix"
|
||||||
|
|
||||||
|
|
||||||
|
def test_litellm_output_still_suppressed_during_llm_calls():
|
||||||
|
"""Test that litellm output is still suppressed during actual LLM calls after the fix."""
|
||||||
|
from crewai.llm import LLM
|
||||||
|
|
||||||
|
captured_stdout = io.StringIO()
|
||||||
|
captured_stderr = io.StringIO()
|
||||||
|
|
||||||
|
with patch('sys.stdout', captured_stdout), patch('sys.stderr', captured_stderr):
|
||||||
|
with patch('litellm.completion') as mock_completion:
|
||||||
|
mock_completion.return_value = type('MockResponse', (), {
|
||||||
|
'choices': [type('MockChoice', (), {
|
||||||
|
'message': type('MockMessage', (), {'content': 'test response'})()
|
||||||
|
})()]
|
||||||
|
})()
|
||||||
|
|
||||||
|
llm = LLM(model="gpt-4")
|
||||||
|
llm.call([{"role": "user", "content": "test"}])
|
||||||
|
|
||||||
|
output = captured_stdout.getvalue() + captured_stderr.getvalue()
|
||||||
|
assert "litellm" not in output.lower(), "litellm output should still be suppressed during calls"
|
||||||
|
|
||||||
|
|
||||||
|
def test_concurrent_llm_calls():
|
||||||
|
"""Test that contextual suppression works correctly with concurrent calls."""
|
||||||
|
import threading
|
||||||
|
from crewai.llm import LLM
|
||||||
|
|
||||||
|
results = []
|
||||||
|
|
||||||
|
def make_llm_call():
|
||||||
|
with patch('litellm.completion') as mock_completion:
|
||||||
|
mock_completion.return_value = type('MockResponse', (), {
|
||||||
|
'choices': [type('MockChoice', (), {
|
||||||
|
'message': type('MockMessage', (), {'content': 'test response'})()
|
||||||
|
})()]
|
||||||
|
})()
|
||||||
|
|
||||||
|
llm = LLM(model="gpt-4")
|
||||||
|
result = llm.call([{"role": "user", "content": "test"}])
|
||||||
|
results.append(result)
|
||||||
|
|
||||||
|
threads = [threading.Thread(target=make_llm_call) for _ in range(3)]
|
||||||
|
for thread in threads:
|
||||||
|
thread.start()
|
||||||
|
for thread in threads:
|
||||||
|
thread.join()
|
||||||
|
|
||||||
|
assert len(results) == 3
|
||||||
|
assert all("test response" in result for result in results)
|
||||||
|
|
||||||
|
|
||||||
|
def test_logger_performance():
|
||||||
|
"""Test that logger operations work correctly without global caching."""
|
||||||
|
from crewai.llm import suppress_litellm_output
|
||||||
|
|
||||||
|
with patch('logging.getLogger') as mock_get_logger:
|
||||||
|
mock_logger = MagicMock()
|
||||||
|
mock_get_logger.return_value = mock_logger
|
||||||
|
|
||||||
|
with suppress_litellm_output():
|
||||||
|
pass
|
||||||
|
|
||||||
|
with suppress_litellm_output():
|
||||||
|
pass
|
||||||
|
|
||||||
|
assert mock_get_logger.call_count == 2
|
||||||
|
mock_get_logger.assert_called_with("litellm")
|
||||||
|
|
||||||
|
|
||||||
|
def test_suppression_error_handling():
|
||||||
|
"""Test that suppression continues even if logger operations fail."""
|
||||||
|
from crewai.llm import suppress_litellm_output
|
||||||
|
|
||||||
|
with patch('logging.getLogger') as mock_get_logger:
|
||||||
|
mock_logger = MagicMock()
|
||||||
|
mock_logger.setLevel.side_effect = Exception("Logger error")
|
||||||
|
mock_get_logger.return_value = mock_logger
|
||||||
|
|
||||||
|
try:
|
||||||
|
with suppress_litellm_output():
|
||||||
|
result = "operation completed"
|
||||||
|
assert result == "operation completed"
|
||||||
|
except Exception:
|
||||||
|
pytest.fail("Suppression should not fail even if logger operations fail")
|
||||||
Reference in New Issue
Block a user