Compare commits

..

11 Commits

Author SHA1 Message Date
Greyson LaLonde
2652dfecb3 fix: constrain unstructured to ensure python compat 2026-02-11 12:58:55 -05:00
Greyson LaLonde
a71e5db75f feat: update lib bounds; update actions 2026-02-11 11:11:45 -05:00
Greyson LaLonde
3963bb3986 Merge branch 'gl/feat/allow-python314' of https://github.com/crewAIInc/crewAI into gl/feat/allow-python314 2026-02-11 11:05:58 -05:00
Greyson LaLonde
82bf8965b0 chore: update actions to allow 3.14 2026-02-11 11:05:48 -05:00
Greyson LaLonde
c4c35502ba Merge branch 'main' into gl/feat/allow-python314 2026-02-11 10:56:37 -05:00
Greyson LaLonde
13478b87ec feat: extend requires-python upper bound to include 3.14.x 2026-02-11 10:52:58 -05:00
Greyson LaLonde
3a22e80764 fix: ensure openai tool call stream is finalized
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
2026-02-11 10:02:31 -05:00
Greyson LaLonde
9b585a934d fix: pass started_event_id to crew 2026-02-11 09:30:07 -05:00
Rip&Tear
46e1b02154 chore: fix codeql coverage and action version (#4454) 2026-02-11 18:20:07 +08:00
Rip&Tear
87675b49fd test: avoid URL substring assertion in brave search test (#4453)
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
2026-02-11 14:32:10 +08:00
Lucas Gomide
a3bee66be8 Address OpenSSL CVE-2025-15467 vulnerability (#4426)
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Build uv cache / build-cache (3.10) (push) Has been cancelled
Build uv cache / build-cache (3.11) (push) Has been cancelled
Build uv cache / build-cache (3.12) (push) Has been cancelled
Build uv cache / build-cache (3.13) (push) Has been cancelled
* fix(security): bump regex from 2024.9.11 to 2026.1.15

Address security vulnerability flagged in regex==2024.9.11

* bump mcp from 1.23.1 to 1.26.0

Address security vulnerability flagged in mcp==1.16.0 (resolved to 1.23.3)
2026-02-10 09:39:35 -08:00
24 changed files with 3169 additions and 2460 deletions

View File

@@ -65,6 +65,8 @@ body:
- '3.10'
- '3.11'
- '3.12'
- '3.13'
- '3.14'
validations:
required: true
- type: input

View File

@@ -14,13 +14,18 @@ paths-ignore:
- "lib/crewai/src/crewai/experimental/a2a/**"
paths:
# Include GitHub Actions workflows/composite actions for CodeQL actions analysis
- ".github/workflows/**"
- ".github/actions/**"
# Include all Python source code from workspace packages
- "lib/crewai/src/**"
- "lib/crewai-tools/src/**"
- "lib/crewai-files/src/**"
- "lib/devtools/src/**"
# Include tests (but exclude cassettes via paths-ignore)
- "lib/crewai/tests/**"
- "lib/crewai-tools/tests/**"
- "lib/crewai-files/tests/**"
- "lib/devtools/tests/**"
# Configure specific queries or packs if needed

View File

@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10", "3.11", "3.12", "3.13"]
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
steps:
- name: Checkout repository
@@ -32,6 +32,10 @@ jobs:
python-version: ${{ matrix.python-version }}
enable-cache: false
- name: Install system build dependencies
if: matrix.python-version == '3.14'
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
- name: Install dependencies and populate cache
run: |
echo "Building global UV cache for Python ${{ matrix.python-version }}..."

View File

@@ -69,7 +69,7 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
uses: github/codeql-action/init@v4
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
@@ -98,6 +98,6 @@ jobs:
exit 1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
uses: github/codeql-action/analyze@v4
with:
category: "/language:${{matrix.language}}"

View File

@@ -26,15 +26,15 @@ jobs:
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
key: uv-main-py3.12-${{ hashFiles('uv.lock') }}
restore-keys: |
uv-main-py3.11-
uv-main-py3.12-
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
version: "0.8.4"
python-version: "3.11"
python-version: "3.12"
enable-cache: false
- name: Install dependencies
@@ -66,4 +66,4 @@ jobs:
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
key: uv-main-py3.12-${{ hashFiles('uv.lock') }}

View File

@@ -13,8 +13,8 @@ jobs:
strategy:
fail-fast: true
matrix:
python-version: ['3.10', '3.11', '3.12', '3.13']
group: [1, 2, 3, 4, 5, 6, 7, 8]
python-version: ['3.10', '3.11', '3.12', '3.13', '3.14']
group: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -40,6 +40,10 @@ jobs:
python-version: ${{ matrix.python-version }}
enable-cache: false
- name: Install system build dependencies
if: matrix.python-version == '3.14'
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
- name: Install the project
run: uv sync --all-groups --all-extras
@@ -49,7 +53,7 @@ jobs:
path: .test_durations_py*
key: test-durations-py${{ matrix.python-version }}
- name: Run tests (group ${{ matrix.group }} of 8)
- name: Run tests (group ${{ matrix.group }} of 16)
run: |
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
DURATION_FILE="../../.test_durations_py${PYTHON_VERSION_SAFE}"
@@ -73,17 +77,17 @@ jobs:
cd lib/crewai && uv run pytest \
-vv \
--splits 8 \
--splits 16 \
--group ${{ matrix.group }} \
$DURATIONS_ARG \
--durations=10 \
--maxfail=3
- name: Run tool tests (group ${{ matrix.group }} of 8)
- name: Run tool tests (group ${{ matrix.group }} of 16)
run: |
cd lib/crewai-tools && uv run pytest \
-vv \
--splits 8 \
--splits 16 \
--group ${{ matrix.group }} \
--durations=10 \
--maxfail=3

View File

@@ -12,7 +12,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.10", "3.11", "3.12", "3.13"]
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
steps:
- name: Checkout code
@@ -39,6 +39,10 @@ jobs:
python-version: ${{ matrix.python-version }}
enable-cache: false
- name: Install system build dependencies
if: matrix.python-version == '3.14'
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
- name: Install dependencies
run: uv sync --all-groups --all-extras

View File

@@ -16,11 +16,11 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12', '3.13']
python-version: ['3.10', '3.11', '3.12', '3.13', '3.14']
env:
OPENAI_API_KEY: fake-api-key
PYTHONUNBUFFERED: 1
steps:
- name: Checkout repository
uses: actions/checkout@v4
@@ -44,6 +44,10 @@ jobs:
python-version: ${{ matrix.python-version }}
enable-cache: false
- name: Install system build dependencies
if: matrix.python-version == '3.14'
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
- name: Install the project
run: uv sync --all-groups --all-extras
@@ -68,4 +72,4 @@ jobs:
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}

View File

@@ -6,7 +6,7 @@ readme = "README.md"
authors = [
{ name = "Greyson LaLonde", email = "greyson@crewai.com" }
]
requires-python = ">=3.10, <3.14"
requires-python = ">=3.10,<3.15"
dependencies = [
"Pillow~=10.4.0",
"pypdf~=4.0.0",

View File

@@ -6,7 +6,7 @@ readme = "README.md"
authors = [
{ name = "João Moura", email = "joaomdmoura@gmail.com" },
]
requires-python = ">=3.10, <3.14"
requires-python = ">=3.10,<3.15"
dependencies = [
"lancedb~=0.5.4",
"pytube~=15.0.0",
@@ -118,7 +118,7 @@ rag = [
"lxml>=5.3.0,<5.4.0", # Pin to avoid etree import issues in 5.4.0
]
xml = [
"unstructured[local-inference, all-docs]>=0.17.2"
"unstructured[local-inference, all-docs]>=0.17.2,<0.18.31"
]
oxylabs = [
"oxylabs==2.0.0"

View File

@@ -33,8 +33,11 @@ def test_brave_tool_search(mock_get, brave_tool):
mock_get.return_value.json.return_value = mock_response
result = brave_tool.run(query="test")
assert "Test Title" in result
assert "http://test.com" in result
data = json.loads(result)
assert isinstance(data, list)
assert len(data) >= 1
assert data[0]["title"] == "Test Title"
assert data[0]["url"] == "http://test.com"
@patch("requests.get")

View File

@@ -6,7 +6,7 @@ readme = "README.md"
authors = [
{ name = "Joao Moura", email = "joao@crewai.com" }
]
requires-python = ">=3.10, <3.14"
requires-python = ">=3.10,<3.15"
dependencies = [
# Core Dependencies
"pydantic~=2.11.9",
@@ -14,7 +14,7 @@ dependencies = [
"instructor>=1.3.3",
# Text Processing
"pdfplumber~=0.11.4",
"regex~=2024.9.11",
"regex~=2026.1.15",
# Telemetry and Monitoring
"opentelemetry-api~=1.34.0",
"opentelemetry-sdk~=1.34.0",
@@ -36,7 +36,7 @@ dependencies = [
"json5~=0.10.0",
"portalocker~=2.7.0",
"pydantic-settings~=2.10.1",
"mcp~=1.23.1",
"mcp~=1.26.0",
"uv~=0.9.13",
"aiosqlite~=0.21.0",
]

View File

@@ -3,7 +3,7 @@ name = "{{folder_name}}"
version = "0.1.0"
description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
requires-python = ">=3.10,<3.15"
dependencies = [
"crewai[tools]==1.9.3"
]

View File

@@ -3,7 +3,7 @@ name = "{{folder_name}}"
version = "0.1.0"
description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
requires-python = ">=3.10,<3.15"
dependencies = [
"crewai[tools]==1.9.3"
]

View File

@@ -3,9 +3,9 @@ name = "{{folder_name}}"
version = "0.1.0"
description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
requires-python = ">=3.10,<3.15"
dependencies = [
"crewai[tools]>=0.203.1"
"crewai[tools]==1.9.3"
]
[tool.crewai]

View File

@@ -187,6 +187,7 @@ class Crew(FlowTrackable, BaseModel):
_task_output_handler: TaskOutputStorageHandler = PrivateAttr(
default_factory=TaskOutputStorageHandler
)
_kickoff_event_id: str | None = PrivateAttr(default=None)
name: str | None = Field(default="crew")
cache: bool = Field(default=True)
@@ -759,7 +760,11 @@ class Crew(FlowTrackable, BaseModel):
except Exception as e:
crewai_event_bus.emit(
self,
CrewKickoffFailedEvent(error=str(e), crew_name=self.name),
CrewKickoffFailedEvent(
error=str(e),
crew_name=self.name,
started_event_id=self._kickoff_event_id,
),
)
raise
finally:
@@ -949,7 +954,11 @@ class Crew(FlowTrackable, BaseModel):
except Exception as e:
crewai_event_bus.emit(
self,
CrewKickoffFailedEvent(error=str(e), crew_name=self.name),
CrewKickoffFailedEvent(
error=str(e),
crew_name=self.name,
started_event_id=self._kickoff_event_id,
),
)
raise
finally:
@@ -1524,6 +1533,7 @@ class Crew(FlowTrackable, BaseModel):
crew_name=self.name,
output=final_task_output,
total_tokens=self.token_usage.total_tokens,
started_event_id=self._kickoff_event_id,
),
)

View File

@@ -265,10 +265,9 @@ def prepare_kickoff(
normalized = {}
normalized = before_callback(normalized)
future = crewai_event_bus.emit(
crew,
CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized),
)
started_event = CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized)
crew._kickoff_event_id = started_event.event_id
future = crewai_event_bus.emit(crew, started_event)
if future is not None:
try:
future.result()

View File

@@ -1116,16 +1116,6 @@ class OpenAICompletion(BaseLLM):
return parsed_result
if function_calls and not available_functions:
self._emit_call_completed_event(
response=function_calls,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return function_calls
if function_calls and available_functions:
for call in function_calls:
function_name = call.get("name", "")
@@ -1254,16 +1244,6 @@ class OpenAICompletion(BaseLLM):
return parsed_result
if function_calls and not available_functions:
self._emit_call_completed_event(
response=function_calls,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return function_calls
if function_calls and available_functions:
for call in function_calls:
function_name = call.get("name", "")
@@ -1716,6 +1696,99 @@ class OpenAICompletion(BaseLLM):
return content
def _finalize_streaming_response(
self,
full_response: str,
tool_calls: dict[int, dict[str, Any]],
usage_data: dict[str, int],
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
) -> str | list[dict[str, Any]]:
"""Finalize a streaming response with usage tracking, tool call handling, and events.
Args:
full_response: The accumulated text response from the stream.
tool_calls: Accumulated tool calls from the stream, keyed by index.
usage_data: Token usage data from the stream.
params: The completion parameters containing messages.
available_functions: Available functions for tool calling.
from_task: Task that initiated the call.
from_agent: Agent that initiated the call.
Returns:
Tool calls list when tools were invoked without available_functions,
tool execution result when available_functions is provided,
or the text response string.
"""
self._track_token_usage_internal(usage_data)
if tool_calls and not available_functions:
tool_calls_list = [
{
"id": call_data["id"],
"type": "function",
"function": {
"name": call_data["name"],
"arguments": call_data["arguments"],
},
"index": call_data["index"],
}
for call_data in tool_calls.values()
]
self._emit_call_completed_event(
response=tool_calls_list,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return tool_calls_list
if tool_calls and available_functions:
for call_data in tool_calls.values():
function_name = call_data["name"]
arguments = call_data["arguments"]
if not function_name or not arguments:
continue
if function_name not in available_functions:
logging.warning(
f"Function '{function_name}' not found in available functions"
)
continue
try:
function_args = json.loads(arguments)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse streamed tool arguments: {e}")
continue
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return full_response
def _handle_streaming_completion(
self,
params: dict[str, Any],
@@ -1723,7 +1796,7 @@ class OpenAICompletion(BaseLLM):
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | BaseModel:
) -> str | list[dict[str, Any]] | BaseModel:
"""Handle streaming chat completion."""
full_response = ""
tool_calls: dict[int, dict[str, Any]] = {}
@@ -1840,75 +1913,20 @@ class OpenAICompletion(BaseLLM):
response_id=response_id_stream,
)
self._track_token_usage_internal(usage_data)
if tool_calls and not available_functions:
formatted_tool_calls = [
{
"id": call_data.get("id", f"call_{idx}"),
"type": "function",
"function": {
"name": call_data["name"],
"arguments": call_data["arguments"],
},
}
for idx, call_data in tool_calls.items()
]
self._emit_call_completed_event(
response=formatted_tool_calls,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return formatted_tool_calls
if tool_calls and available_functions:
for call_data in tool_calls.values():
function_name = call_data["name"]
arguments = call_data["arguments"]
# Skip if function name is empty or arguments are empty
if not function_name or not arguments:
continue
# Check if function exists in available functions
if function_name not in available_functions:
logging.warning(
f"Function '{function_name}' not found in available functions"
)
continue
try:
function_args = json.loads(arguments)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse streamed tool arguments: {e}")
continue
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
result = self._finalize_streaming_response(
full_response=full_response,
tool_calls=tool_calls,
usage_data=usage_data,
params=params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return self._invoke_after_llm_call_hooks(
params["messages"], full_response, from_agent
)
if isinstance(result, str):
return self._invoke_after_llm_call_hooks(
params["messages"], result, from_agent
)
return result
async def _ahandle_completion(
self,
@@ -2057,7 +2075,7 @@ class OpenAICompletion(BaseLLM):
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | BaseModel:
) -> str | list[dict[str, Any]] | BaseModel:
"""Handle async streaming chat completion."""
full_response = ""
tool_calls: dict[int, dict[str, Any]] = {}
@@ -2183,72 +2201,16 @@ class OpenAICompletion(BaseLLM):
response_id=response_id_stream,
)
self._track_token_usage_internal(usage_data)
if tool_calls and not available_functions:
formatted_tool_calls = [
{
"id": call_data.get("id", f"call_{idx}"),
"type": "function",
"function": {
"name": call_data["name"],
"arguments": call_data["arguments"],
},
}
for idx, call_data in tool_calls.items()
]
self._emit_call_completed_event(
response=formatted_tool_calls,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return formatted_tool_calls
if tool_calls and available_functions:
for call_data in tool_calls.values():
function_name = call_data["name"]
arguments = call_data["arguments"]
if not function_name or not arguments:
continue
if function_name not in available_functions:
logging.warning(
f"Function '{function_name}' not found in available functions"
)
continue
try:
function_args = json.loads(arguments)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse streamed tool arguments: {e}")
continue
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
return self._finalize_streaming_response(
full_response=full_response,
tool_calls=tool_calls,
usage_data=usage_data,
params=params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return full_response
def supports_function_calling(self) -> bool:
"""Check if the model supports function calling."""
return not self.is_o1_model

View File

@@ -230,7 +230,7 @@ class TestDeployCommand(unittest.TestCase):
[project]
name = "test_project"
version = "0.1.0"
requires-python = ">=3.10,<3.14"
requires-python = ">=3.10,<3.15"
dependencies = ["crewai"]
""",
)
@@ -249,7 +249,7 @@ class TestDeployCommand(unittest.TestCase):
[project]
name = "test_project"
version = "0.1.0"
requires-python = ">=3.10,<3.14"
requires-python = ">=3.10,<3.15"
dependencies = ["crewai"]
""",
)

View File

@@ -1,6 +1,7 @@
import os
import sys
import types
from typing import Any
from unittest.mock import patch, MagicMock
import openai
import pytest
@@ -1578,3 +1579,167 @@ def test_openai_structured_output_preserves_json_with_stop_word_patterns():
assert "Action:" in result.action_taken
assert "Observation:" in result.observation_result
assert "Final Answer:" in result.final_answer
def test_openai_streaming_returns_tool_calls_without_available_functions():
"""Test that streaming returns tool calls list when available_functions is None.
This mirrors the non-streaming path where tool_calls are returned for
the executor to handle. Reproduces the bug where streaming with tool
calls would return empty text instead of tool_calls when
available_functions was not provided (as the crew executor does).
"""
llm = LLM(model="openai/gpt-4o-mini", stream=True)
mock_chunk_1 = MagicMock()
mock_chunk_1.choices = [MagicMock()]
mock_chunk_1.choices[0].delta = MagicMock()
mock_chunk_1.choices[0].delta.content = None
mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_1.choices[0].delta.tool_calls[0].index = 0
mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123"
mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator"
mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr'
mock_chunk_1.choices[0].finish_reason = None
mock_chunk_1.usage = None
mock_chunk_1.id = "chatcmpl-1"
mock_chunk_2 = MagicMock()
mock_chunk_2.choices = [MagicMock()]
mock_chunk_2.choices[0].delta = MagicMock()
mock_chunk_2.choices[0].delta.content = None
mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_2.choices[0].delta.tool_calls[0].index = 0
mock_chunk_2.choices[0].delta.tool_calls[0].id = None
mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None
mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}'
mock_chunk_2.choices[0].finish_reason = None
mock_chunk_2.usage = None
mock_chunk_2.id = "chatcmpl-1"
mock_chunk_3 = MagicMock()
mock_chunk_3.choices = [MagicMock()]
mock_chunk_3.choices[0].delta = MagicMock()
mock_chunk_3.choices[0].delta.content = None
mock_chunk_3.choices[0].delta.tool_calls = None
mock_chunk_3.choices[0].finish_reason = "tool_calls"
mock_chunk_3.usage = MagicMock()
mock_chunk_3.usage.prompt_tokens = 10
mock_chunk_3.usage.completion_tokens = 5
mock_chunk_3.id = "chatcmpl-1"
with patch.object(
llm.client.chat.completions, "create", return_value=iter([mock_chunk_1, mock_chunk_2, mock_chunk_3])
):
result = llm.call(
messages=[{"role": "user", "content": "Calculate 1+1"}],
tools=[{
"type": "function",
"function": {
"name": "calculator",
"description": "Calculate expression",
"parameters": {"type": "object", "properties": {"expression": {"type": "string"}}},
},
}],
available_functions=None,
)
assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}"
assert len(result) == 1
assert result[0]["function"]["name"] == "calculator"
assert result[0]["function"]["arguments"] == '{"expression": "1+1"}'
assert result[0]["id"] == "call_abc123"
assert result[0]["type"] == "function"
@pytest.mark.asyncio
async def test_openai_async_streaming_returns_tool_calls_without_available_functions():
"""Test that async streaming returns tool calls list when available_functions is None.
Same as the sync test but for the async path (_ahandle_streaming_completion).
"""
llm = LLM(model="openai/gpt-4o-mini", stream=True)
mock_chunk_1 = MagicMock()
mock_chunk_1.choices = [MagicMock()]
mock_chunk_1.choices[0].delta = MagicMock()
mock_chunk_1.choices[0].delta.content = None
mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_1.choices[0].delta.tool_calls[0].index = 0
mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123"
mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator"
mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr'
mock_chunk_1.choices[0].finish_reason = None
mock_chunk_1.usage = None
mock_chunk_1.id = "chatcmpl-1"
mock_chunk_2 = MagicMock()
mock_chunk_2.choices = [MagicMock()]
mock_chunk_2.choices[0].delta = MagicMock()
mock_chunk_2.choices[0].delta.content = None
mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_2.choices[0].delta.tool_calls[0].index = 0
mock_chunk_2.choices[0].delta.tool_calls[0].id = None
mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None
mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}'
mock_chunk_2.choices[0].finish_reason = None
mock_chunk_2.usage = None
mock_chunk_2.id = "chatcmpl-1"
mock_chunk_3 = MagicMock()
mock_chunk_3.choices = [MagicMock()]
mock_chunk_3.choices[0].delta = MagicMock()
mock_chunk_3.choices[0].delta.content = None
mock_chunk_3.choices[0].delta.tool_calls = None
mock_chunk_3.choices[0].finish_reason = "tool_calls"
mock_chunk_3.usage = MagicMock()
mock_chunk_3.usage.prompt_tokens = 10
mock_chunk_3.usage.completion_tokens = 5
mock_chunk_3.id = "chatcmpl-1"
class MockAsyncStream:
"""Async iterator that mimics OpenAI's async streaming response."""
def __init__(self, chunks: list[Any]) -> None:
self._chunks = chunks
self._index = 0
def __aiter__(self) -> "MockAsyncStream":
return self
async def __anext__(self) -> Any:
if self._index >= len(self._chunks):
raise StopAsyncIteration
chunk = self._chunks[self._index]
self._index += 1
return chunk
async def mock_create(**kwargs: Any) -> MockAsyncStream:
return MockAsyncStream([mock_chunk_1, mock_chunk_2, mock_chunk_3])
with patch.object(
llm.async_client.chat.completions, "create", side_effect=mock_create
):
result = await llm.acall(
messages=[{"role": "user", "content": "Calculate 1+1"}],
tools=[{
"type": "function",
"function": {
"name": "calculator",
"description": "Calculate expression",
"parameters": {"type": "object", "properties": {"expression": {"type": "string"}}},
},
}],
available_functions=None,
)
assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}"
assert len(result) == 1
assert result[0]["function"]["name"] == "calculator"
assert result[0]["function"]["arguments"] == '{"expression": "1+1"}'
assert result[0]["id"] == "call_abc123"
assert result[0]["type"] == "function"

View File

@@ -1,449 +0,0 @@
"""Tests for streaming tool call handling when available_functions is None.
Covers the fix for GitHub issue #4442: async streaming fails with tool/function calls
when available_functions is not provided (i.e., when the executor handles tool execution
instead of the LLM provider).
The fix ensures that streaming methods return accumulated tool calls in the correct
format instead of falling through and returning None/empty.
"""
import asyncio
from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from crewai.llms.providers.openai.completion import OpenAICompletion
def _make_completion_chunk(
tool_call_index: int = 0,
tool_call_id: str | None = None,
function_name: str | None = None,
function_arguments: str | None = None,
content: str | None = None,
has_usage: bool = False,
) -> MagicMock:
"""Create a mock ChatCompletionChunk for streaming."""
chunk = MagicMock()
chunk.id = "chatcmpl-test123"
if has_usage:
chunk.usage = MagicMock(prompt_tokens=10, completion_tokens=5, total_tokens=15)
chunk.choices = []
return chunk
chunk.usage = None
choice = MagicMock()
delta = MagicMock()
delta.content = content
if function_name is not None or function_arguments is not None or tool_call_id is not None:
tc = MagicMock()
tc.index = tool_call_index
tc.id = tool_call_id
tc.function = MagicMock()
tc.function.name = function_name
tc.function.arguments = function_arguments
delta.tool_calls = [tc]
else:
delta.tool_calls = None
choice.delta = delta
chunk.choices = [choice]
return chunk
def _make_responses_events(
function_name: str = "get_temperature",
function_args: str = '{"city": "Paris"}',
call_id: str = "call_abc123",
) -> list[MagicMock]:
"""Create mock Responses API streaming events with a function call."""
created_event = MagicMock()
created_event.type = "response.created"
created_event.response = MagicMock(id="resp_test123")
args_delta_event = MagicMock()
args_delta_event.type = "response.function_call_arguments.delta"
item_done_event = MagicMock()
item_done_event.type = "response.output_item.done"
item_done_event.item = MagicMock()
item_done_event.item.type = "function_call"
item_done_event.item.call_id = call_id
item_done_event.item.name = function_name
item_done_event.item.arguments = function_args
completed_event = MagicMock()
completed_event.type = "response.completed"
completed_event.response = MagicMock()
completed_event.response.id = "resp_test123"
completed_event.response.usage = MagicMock(
input_tokens=10, output_tokens=5, total_tokens=15
)
return [created_event, args_delta_event, item_done_event, completed_event]
class TestStreamingCompletionToolCallsNoAvailableFunctions:
"""Tests for _handle_streaming_completion returning tool calls when available_functions is None."""
def test_streaming_completion_returns_tool_calls_when_no_available_functions(self):
"""When streaming with tool calls and available_functions=None,
the method should return formatted tool calls list."""
llm = OpenAICompletion(model="gpt-4o")
chunks = [
_make_completion_chunk(
tool_call_index=0,
tool_call_id="call_abc123",
function_name="get_temperature",
function_arguments="",
),
_make_completion_chunk(
tool_call_index=0,
function_arguments='{"city":',
),
_make_completion_chunk(
tool_call_index=0,
function_arguments=' "Paris"}',
),
_make_completion_chunk(has_usage=True),
]
mock_stream = MagicMock()
mock_stream.__iter__ = MagicMock(return_value=iter(chunks))
with patch.object(
llm.client.chat.completions, "create", return_value=mock_stream
):
result = llm._handle_streaming_completion(
params={"messages": [{"role": "user", "content": "test"}], "stream": True},
available_functions=None,
)
assert isinstance(result, list)
assert len(result) == 1
assert result[0]["type"] == "function"
assert result[0]["function"]["name"] == "get_temperature"
assert result[0]["function"]["arguments"] == '{"city": "Paris"}'
assert result[0]["id"] == "call_abc123"
def test_streaming_completion_multiple_tool_calls_no_available_functions(self):
"""When streaming with multiple tool calls and available_functions=None,
all tool calls should be returned."""
llm = OpenAICompletion(model="gpt-4o")
chunks = [
_make_completion_chunk(
tool_call_index=0,
tool_call_id="call_1",
function_name="get_temperature",
function_arguments='{"city": "Paris"}',
),
_make_completion_chunk(
tool_call_index=1,
tool_call_id="call_2",
function_name="get_temperature",
function_arguments='{"city": "London"}',
),
_make_completion_chunk(has_usage=True),
]
mock_stream = MagicMock()
mock_stream.__iter__ = MagicMock(return_value=iter(chunks))
with patch.object(
llm.client.chat.completions, "create", return_value=mock_stream
):
result = llm._handle_streaming_completion(
params={"messages": [{"role": "user", "content": "test"}], "stream": True},
available_functions=None,
)
assert isinstance(result, list)
assert len(result) == 2
assert result[0]["function"]["name"] == "get_temperature"
assert result[0]["function"]["arguments"] == '{"city": "Paris"}'
assert result[1]["function"]["name"] == "get_temperature"
assert result[1]["function"]["arguments"] == '{"city": "London"}'
def test_streaming_completion_with_available_functions_still_executes(self):
"""When available_functions IS provided, tool should be executed as before."""
llm = OpenAICompletion(model="gpt-4o")
chunks = [
_make_completion_chunk(
tool_call_index=0,
tool_call_id="call_abc",
function_name="get_temperature",
function_arguments='{"city": "Paris"}',
),
_make_completion_chunk(has_usage=True),
]
mock_stream = MagicMock()
mock_stream.__iter__ = MagicMock(return_value=iter(chunks))
with patch.object(
llm.client.chat.completions, "create", return_value=mock_stream
), patch.object(
llm, "_handle_tool_execution", return_value="72F in Paris"
) as mock_exec:
result = llm._handle_streaming_completion(
params={"messages": [{"role": "user", "content": "test"}], "stream": True},
available_functions={"get_temperature": lambda city: f"72F in {city}"},
)
assert result == "72F in Paris"
mock_exec.assert_called_once()
class TestAsyncStreamingCompletionToolCallsNoAvailableFunctions:
"""Tests for _ahandle_streaming_completion returning tool calls when available_functions is None."""
@pytest.mark.asyncio
async def test_async_streaming_completion_returns_tool_calls_when_no_available_functions(self):
"""When async streaming with tool calls and available_functions=None,
the method should return formatted tool calls list."""
llm = OpenAICompletion(model="gpt-4o")
chunks = [
_make_completion_chunk(
tool_call_index=0,
tool_call_id="call_abc123",
function_name="get_temperature",
function_arguments="",
),
_make_completion_chunk(
tool_call_index=0,
function_arguments='{"city":',
),
_make_completion_chunk(
tool_call_index=0,
function_arguments=' "Paris"}',
),
_make_completion_chunk(has_usage=True),
]
async def mock_aiter():
for c in chunks:
yield c
with patch.object(
llm.async_client.chat.completions,
"create",
new_callable=AsyncMock,
return_value=mock_aiter(),
):
result = await llm._ahandle_streaming_completion(
params={"messages": [{"role": "user", "content": "test"}], "stream": True},
available_functions=None,
)
assert isinstance(result, list)
assert len(result) == 1
assert result[0]["type"] == "function"
assert result[0]["function"]["name"] == "get_temperature"
assert result[0]["function"]["arguments"] == '{"city": "Paris"}'
assert result[0]["id"] == "call_abc123"
@pytest.mark.asyncio
async def test_async_streaming_completion_multiple_tool_calls_no_available_functions(self):
"""When async streaming with multiple tool calls and available_functions=None,
all tool calls should be returned."""
llm = OpenAICompletion(model="gpt-4o")
chunks = [
_make_completion_chunk(
tool_call_index=0,
tool_call_id="call_1",
function_name="get_temperature",
function_arguments='{"city": "Paris"}',
),
_make_completion_chunk(
tool_call_index=1,
tool_call_id="call_2",
function_name="get_temperature",
function_arguments='{"city": "London"}',
),
_make_completion_chunk(has_usage=True),
]
async def mock_aiter():
for c in chunks:
yield c
with patch.object(
llm.async_client.chat.completions,
"create",
new_callable=AsyncMock,
return_value=mock_aiter(),
):
result = await llm._ahandle_streaming_completion(
params={"messages": [{"role": "user", "content": "test"}], "stream": True},
available_functions=None,
)
assert isinstance(result, list)
assert len(result) == 2
assert result[0]["function"]["name"] == "get_temperature"
assert result[1]["function"]["name"] == "get_temperature"
assert result[0]["function"]["arguments"] == '{"city": "Paris"}'
assert result[1]["function"]["arguments"] == '{"city": "London"}'
class TestStreamingResponsesToolCallsNoAvailableFunctions:
"""Tests for _handle_streaming_responses returning function calls when available_functions is None."""
def test_streaming_responses_returns_function_calls_when_no_available_functions(self):
"""When streaming Responses API with function calls and available_functions=None,
the method should return function_calls list."""
llm = OpenAICompletion(model="gpt-4o", use_responses_api=True)
events = _make_responses_events(
function_name="get_temperature",
function_args='{"city": "Paris"}',
call_id="call_abc123",
)
mock_stream = MagicMock()
mock_stream.__iter__ = MagicMock(return_value=iter(events))
with patch.object(
llm.client.responses, "create", return_value=mock_stream
):
result = llm._handle_streaming_responses(
params={"input": [{"role": "user", "content": "test"}], "model": "gpt-4o", "stream": True},
available_functions=None,
)
assert isinstance(result, list)
assert len(result) == 1
assert result[0]["name"] == "get_temperature"
assert result[0]["arguments"] == '{"city": "Paris"}'
assert result[0]["id"] == "call_abc123"
def test_streaming_responses_with_available_functions_still_executes(self):
"""When available_functions IS provided, tool should be executed as before."""
llm = OpenAICompletion(model="gpt-4o", use_responses_api=True)
events = _make_responses_events(
function_name="get_temperature",
function_args='{"city": "Paris"}',
)
mock_stream = MagicMock()
mock_stream.__iter__ = MagicMock(return_value=iter(events))
with patch.object(
llm.client.responses, "create", return_value=mock_stream
), patch.object(
llm, "_handle_tool_execution", return_value="72F in Paris"
) as mock_exec:
result = llm._handle_streaming_responses(
params={"input": [{"role": "user", "content": "test"}], "model": "gpt-4o", "stream": True},
available_functions={"get_temperature": lambda city: f"72F in {city}"},
)
assert result == "72F in Paris"
mock_exec.assert_called_once()
class TestAsyncStreamingResponsesToolCallsNoAvailableFunctions:
"""Tests for _ahandle_streaming_responses returning function calls when available_functions is None."""
@pytest.mark.asyncio
async def test_async_streaming_responses_returns_function_calls_when_no_available_functions(self):
"""When async streaming Responses API with function calls and available_functions=None,
the method should return function_calls list."""
llm = OpenAICompletion(model="gpt-4o", use_responses_api=True)
events = _make_responses_events(
function_name="get_temperature",
function_args='{"city": "Paris"}',
call_id="call_abc123",
)
async def mock_aiter():
for e in events:
yield e
with patch.object(
llm.async_client.responses,
"create",
new_callable=AsyncMock,
return_value=mock_aiter(),
):
result = await llm._ahandle_streaming_responses(
params={"input": [{"role": "user", "content": "test"}], "model": "gpt-4o", "stream": True},
available_functions=None,
)
assert isinstance(result, list)
assert len(result) == 1
assert result[0]["name"] == "get_temperature"
assert result[0]["arguments"] == '{"city": "Paris"}'
assert result[0]["id"] == "call_abc123"
@pytest.mark.asyncio
async def test_async_streaming_responses_multiple_function_calls_no_available_functions(self):
"""When async streaming Responses API with multiple function calls and available_functions=None,
all function calls should be returned."""
llm = OpenAICompletion(model="gpt-4o", use_responses_api=True)
created_event = MagicMock()
created_event.type = "response.created"
created_event.response = MagicMock(id="resp_test")
item1 = MagicMock()
item1.type = "response.output_item.done"
item1.item = MagicMock()
item1.item.type = "function_call"
item1.item.call_id = "call_1"
item1.item.name = "get_temperature"
item1.item.arguments = '{"city": "Paris"}'
item2 = MagicMock()
item2.type = "response.output_item.done"
item2.item = MagicMock()
item2.item.type = "function_call"
item2.item.call_id = "call_2"
item2.item.name = "get_temperature"
item2.item.arguments = '{"city": "London"}'
completed = MagicMock()
completed.type = "response.completed"
completed.response = MagicMock()
completed.response.id = "resp_test"
completed.response.usage = MagicMock(
input_tokens=10, output_tokens=5, total_tokens=15
)
events = [created_event, item1, item2, completed]
async def mock_aiter():
for e in events:
yield e
with patch.object(
llm.async_client.responses,
"create",
new_callable=AsyncMock,
return_value=mock_aiter(),
):
result = await llm._ahandle_streaming_responses(
params={"input": [{"role": "user", "content": "test"}], "model": "gpt-4o", "stream": True},
available_functions=None,
)
assert isinstance(result, list)
assert len(result) == 2
assert result[0]["name"] == "get_temperature"
assert result[0]["arguments"] == '{"city": "Paris"}'
assert result[1]["name"] == "get_temperature"
assert result[1]["arguments"] == '{"city": "London"}'

View File

@@ -6,7 +6,7 @@ readme = "README.md"
authors = [
{ name = "Greyson R. LaLonde", email = "greyson@crewai.com" },
]
requires-python = ">=3.10, <3.14"
requires-python = ">=3.10,<3.15"
classifiers = ["Private :: Do Not Upload"]
private = true
dependencies = [

View File

@@ -1,7 +1,7 @@
name = "crewai-workspace"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
readme = "README.md"
requires-python = ">=3.10,<3.14"
requires-python = ">=3.10,<3.15"
authors = [
{ name = "Joao Moura", email = "joao@crewai.com" }
]
@@ -143,6 +143,11 @@ python_classes = "Test*"
python_functions = "test_*"
[tool.uv]
constraint-dependencies = [
"onnxruntime<1.24; python_version < '3.11'",
]
[tool.uv.workspace]
members = [
"lib/crewai",

4635
uv.lock generated

File diff suppressed because it is too large Load Diff