fix: bump litellm to >=1.83.0 to address CVE-2026-35030

* fix: bump litellm to >=1.83.0 to address CVE-2026-35030

Bump litellm from <=1.82.6 to >=1.83.0 to fix JWT auth bypass via
OIDC cache key collision (CVE-2026-35030). Also widen devtools openai
pin from ~=1.83.0 to >=1.83.0,<3 to resolve the version conflict
(litellm 1.83.0 requires openai>=2.8.0).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* fix: resolve mypy errors from litellm bump

- Remove unused type: ignore[import-untyped] on instructor import
- Remove all unused type: ignore[union-attr] comments (litellm types fixed)
- Add hasattr guard for tool_call.function — new litellm adds
  ChatCompletionMessageCustomToolCall to the union which lacks .function

* fix: tighten litellm pin to ~=1.83.0 (patch-only bumps)

>=1.83.0,<2 is too wide — litellm has had breaking changes between
minors. ~=1.83.0 means >=1.83.0,<1.84.0 — gets CVE patches but won't
pull in breaking minor releases.

* ci: bump uv from 0.8.4 to 0.11.3

* fix: resolve mypy errors in openai completion from 2.x type changes

Use isinstance checks with concrete openai response types instead of
string comparisons for proper type narrowing. Update code interpreter
handling for outputs/OutputImage API changes in openai 2.x.

* fix: pre-cache tiktoken encoding before VCR intercepts requests

---------

Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Co-authored-by: Alex <alex@crewai.com>
Co-authored-by: Greyson LaLonde <greyson@crewai.com>
This commit is contained in:
Lorenze Jay
2026-04-06 09:41:20 -07:00
committed by GitHub
parent 71b4667a0e
commit fdb9b6f090
16 changed files with 215 additions and 144 deletions

View File

@@ -83,7 +83,7 @@ voyageai = [
"voyageai~=0.3.5",
]
litellm = [
"litellm>=1.74.9,<=1.82.6",
"litellm~=1.83.0",
]
bedrock = [
"boto3~=1.40.45",

View File

@@ -13,7 +13,15 @@ from openai.lib.streaming.chat import ChatCompletionStream
from openai.types.chat import ChatCompletion, ChatCompletionChunk
from openai.types.chat.chat_completion import Choice
from openai.types.chat.chat_completion_chunk import ChoiceDelta
from openai.types.responses import Response
from openai.types.responses import (
Response,
ResponseCodeInterpreterToolCall,
ResponseComputerToolCall,
ResponseFileSearchToolCall,
ResponseFunctionToolCall,
ResponseFunctionWebSearch,
ResponseReasoningItem,
)
from pydantic import BaseModel, PrivateAttr, model_validator
from crewai.events.types.llm_events import LLMCallType
@@ -1344,105 +1352,102 @@ class OpenAICompletion(BaseLLM):
)
for item in response.output:
item_type = item.type
if item_type == "web_search_call":
if isinstance(item, ResponseFunctionWebSearch):
result.web_search_results.append(
WebSearchResult(
id=item.id,
status=item.status, # type: ignore[union-attr]
type=item_type,
status=item.status,
type=item.type,
)
)
elif item_type == "file_search_call":
elif isinstance(item, ResponseFileSearchToolCall):
file_results: list[FileSearchResultItem] = (
[
FileSearchResultItem(
file_id=r.file_id, # type: ignore[union-attr]
filename=r.filename, # type: ignore[union-attr]
text=r.text, # type: ignore[union-attr]
score=r.score, # type: ignore[union-attr]
attributes=r.attributes, # type: ignore[union-attr]
file_id=r.file_id,
filename=r.filename,
text=r.text,
score=r.score,
attributes=r.attributes,
)
for r in item.results # type: ignore[union-attr]
for r in item.results
]
if item.results # type: ignore[union-attr]
if item.results
else []
)
result.file_search_results.append(
FileSearchResult(
id=item.id,
status=item.status, # type: ignore[union-attr]
type=item_type,
queries=list(item.queries), # type: ignore[union-attr]
status=item.status,
type=item.type,
queries=list(item.queries),
results=file_results,
)
)
elif item_type == "code_interpreter_call":
elif isinstance(item, ResponseCodeInterpreterToolCall):
code_results: list[
CodeInterpreterLogResult | CodeInterpreterFileResult
] = []
for r in item.results: # type: ignore[union-attr]
if r.type == "logs": # type: ignore[union-attr]
for r in item.outputs or []:
if r.type == "logs":
code_results.append(
CodeInterpreterLogResult(type="logs", logs=r.logs) # type: ignore[union-attr]
CodeInterpreterLogResult(type="logs", logs=r.logs)
)
elif r.type == "files": # type: ignore[union-attr]
files_data = [
{"file_id": f.file_id, "mime_type": f.mime_type}
for f in r.files # type: ignore[union-attr]
]
elif r.type == "image":
code_results.append(
CodeInterpreterFileResult(type="files", files=files_data)
CodeInterpreterFileResult(
type="files",
files=[{"url": r.url}],
)
)
result.code_interpreter_results.append(
CodeInterpreterResult(
id=item.id,
status=item.status, # type: ignore[union-attr]
type=item_type,
code=item.code, # type: ignore[union-attr]
container_id=item.container_id, # type: ignore[union-attr]
status=item.status,
type=item.type,
code=item.code,
container_id=item.container_id,
results=code_results,
)
)
elif item_type == "computer_call":
action_dict = item.action.model_dump() if item.action else {} # type: ignore[union-attr]
elif isinstance(item, ResponseComputerToolCall):
action_dict = item.action.model_dump() if item.action else {}
safety_checks = [
{"id": c.id, "code": c.code, "message": c.message}
for c in item.pending_safety_checks # type: ignore[union-attr]
for c in item.pending_safety_checks
]
result.computer_use_results.append(
ComputerUseResult(
id=item.id,
status=item.status, # type: ignore[union-attr]
type=item_type,
call_id=item.call_id, # type: ignore[union-attr]
status=item.status,
type=item.type,
call_id=item.call_id,
action=action_dict,
pending_safety_checks=safety_checks,
)
)
elif item_type == "reasoning":
summaries = [{"type": s.type, "text": s.text} for s in item.summary] # type: ignore[union-attr]
elif isinstance(item, ResponseReasoningItem):
summaries = [{"type": s.type, "text": s.text} for s in item.summary]
result.reasoning_summaries.append(
ReasoningSummary(
id=item.id,
status=item.status, # type: ignore[union-attr]
type=item_type,
status=item.status,
type=item.type,
summary=summaries,
encrypted_content=item.encrypted_content, # type: ignore[union-attr]
encrypted_content=item.encrypted_content,
)
)
elif item_type == "function_call":
elif isinstance(item, ResponseFunctionToolCall):
result.function_calls.append(
{
"id": item.call_id, # type: ignore[union-attr]
"name": item.name, # type: ignore[union-attr]
"arguments": item.arguments, # type: ignore[union-attr]
"id": item.call_id,
"name": item.name,
"arguments": item.arguments,
}
)
@@ -1625,6 +1630,10 @@ class OpenAICompletion(BaseLLM):
# If there are tool_calls and available_functions, execute the tools
if message.tool_calls and available_functions:
tool_call = message.tool_calls[0]
if not hasattr(tool_call, "function") or tool_call.function is None:
raise ValueError(
f"Unsupported tool call type: {type(tool_call).__name__}"
)
function_name = tool_call.function.name
try:
@@ -2010,6 +2019,10 @@ class OpenAICompletion(BaseLLM):
# If there are tool_calls and available_functions, execute the tools
if message.tool_calls and available_functions:
tool_call = message.tool_calls[0]
if not hasattr(tool_call, "function") or tool_call.function is None:
raise ValueError(
f"Unsupported tool call type: {type(tool_call).__name__}"
)
function_name = tool_call.function.name
try:

View File

@@ -60,7 +60,7 @@ class InternalInstructor(Generic[T]):
self.llm = llm or (agent.function_calling_llm or agent.llm if agent else None)
with suppress_warnings():
import instructor # type: ignore[import-untyped]
import instructor
if (
self.llm is not None

View File

@@ -7,6 +7,9 @@ import tiktoken
from pydantic import BaseModel
from crewai.llm import LLM
# Pre-cache tiktoken encoding so VCR doesn't intercept the download request
tiktoken.get_encoding("cl100k_base")
from crewai.llms.providers.anthropic.completion import AnthropicCompletion

View File

@@ -12,7 +12,7 @@ private = true
dependencies = [
"click~=8.1.7",
"toml~=0.10.2",
"openai~=1.83.0",
"openai>=1.83.0,<3",
"python-dotenv~=1.1.1",
"pygithub~=1.59.1",
"rich>=13.9.4",