fix: resolve CI failures for fallback LLM implementation

- Fix type checker error by adding None check before raising last_exception
- Fix ContextWindowExceededError constructor with correct signature (message, model, llm_provider)
- Update auth error test assertion to match new print message format

Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
Devin AI
2025-06-19 06:13:44 +00:00
parent 06e2683fd8
commit bfe3931dcd
3 changed files with 8 additions and 3 deletions

View File

@@ -187,7 +187,10 @@ def get_llm_response(
continue
printer.print(content="All LLMs failed, raising last exception", color="red")
raise last_exception
if last_exception is not None:
raise last_exception
else:
raise RuntimeError("All LLMs failed but no exception was captured")
def process_llm_response(

View File

@@ -1984,7 +1984,7 @@ def test_crew_agent_executor_litellm_auth_error():
)
# Verify error handling messages
error_message = f"Error during LLM call: {str(mock_llm_call.side_effect)}"
error_message = f"Primary LLM failed: {str(mock_llm_call.side_effect)}"
mock_printer.assert_any_call(
content=error_message,
color="red",

View File

@@ -133,7 +133,9 @@ def test_agent_fallback_context_window_error():
with patch.object(primary_llm, 'call') as mock_primary, \
patch.object(fallback_llm, 'call') as mock_fallback:
mock_primary.side_effect = ContextWindowExceededError("Context window exceeded")
mock_primary.side_effect = ContextWindowExceededError(
message="Context window exceeded", model="gpt-4", llm_provider="openai"
)
mock_fallback.return_value = "Fallback response"
result = agent.execute_task(task)