chore: improve typing and consolidate utilities

- add type annotations across utility modules  
- refactor printer system, agent utils, and imports for consistency  
- remove unused modules, constants, and redundant patterns  
- improve runtime type checks, exception handling, and guardrail validation  
- standardize warning suppression and logging utilities  
- fix llm typing, threading/typing edge cases, and test behavior
This commit is contained in:
Greyson LaLonde
2025-09-23 11:33:46 -04:00
committed by GitHub
parent 34bed359a6
commit 3e97393f58
47 changed files with 1939 additions and 1233 deletions

View File

@@ -7,15 +7,14 @@ import pytest
from pydantic import BaseModel
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM
from crewai.events.event_types import (
LLMCallCompletedEvent,
LLMStreamChunkEvent,
ToolUsageStartedEvent,
ToolUsageFinishedEvent,
ToolUsageErrorEvent,
ToolUsageFinishedEvent,
ToolUsageStartedEvent,
)
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM
from crewai.utilities.token_counter_callback import TokenCalcHandler
@@ -376,11 +375,11 @@ def get_weather_tool_schema():
def test_context_window_exceeded_error_handling():
"""Test that litellm.ContextWindowExceededError is converted to LLMContextLengthExceededException."""
"""Test that litellm.ContextWindowExceededError is converted to LLMContextLengthExceededError."""
from litellm.exceptions import ContextWindowExceededError
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
LLMContextLengthExceededError,
)
llm = LLM(model="gpt-4")
@@ -393,7 +392,7 @@ def test_context_window_exceeded_error_handling():
llm_provider="openai",
)
with pytest.raises(LLMContextLengthExceededException) as excinfo:
with pytest.raises(LLMContextLengthExceededError) as excinfo:
llm.call("This is a test message")
assert "context length exceeded" in str(excinfo.value).lower()
@@ -408,7 +407,7 @@ def test_context_window_exceeded_error_handling():
llm_provider="openai",
)
with pytest.raises(LLMContextLengthExceededException) as excinfo:
with pytest.raises(LLMContextLengthExceededError) as excinfo:
llm.call("This is a test message")
assert "context length exceeded" in str(excinfo.value).lower()