mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-02-12 00:48:15 +00:00
Compare commits
8 Commits
alert-auto
...
gl/feat/al
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2652dfecb3 | ||
|
|
a71e5db75f | ||
|
|
3963bb3986 | ||
|
|
82bf8965b0 | ||
|
|
c4c35502ba | ||
|
|
13478b87ec | ||
|
|
3a22e80764 | ||
|
|
9b585a934d |
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -65,6 +65,8 @@ body:
|
||||
- '3.10'
|
||||
- '3.11'
|
||||
- '3.12'
|
||||
- '3.13'
|
||||
- '3.14'
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
|
||||
6
.github/workflows/build-uv-cache.yml
vendored
6
.github/workflows/build-uv-cache.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11", "3.12", "3.13"]
|
||||
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -32,6 +32,10 @@ jobs:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
|
||||
- name: Install system build dependencies
|
||||
if: matrix.python-version == '3.14'
|
||||
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
|
||||
|
||||
- name: Install dependencies and populate cache
|
||||
run: |
|
||||
echo "Building global UV cache for Python ${{ matrix.python-version }}..."
|
||||
|
||||
8
.github/workflows/linter.yml
vendored
8
.github/workflows/linter.yml
vendored
@@ -26,15 +26,15 @@ jobs:
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
|
||||
key: uv-main-py3.12-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py3.11-
|
||||
uv-main-py3.12-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: "3.11"
|
||||
python-version: "3.12"
|
||||
enable-cache: false
|
||||
|
||||
- name: Install dependencies
|
||||
@@ -66,4 +66,4 @@ jobs:
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
|
||||
key: uv-main-py3.12-${{ hashFiles('uv.lock') }}
|
||||
|
||||
16
.github/workflows/tests.yml
vendored
16
.github/workflows/tests.yml
vendored
@@ -13,8 +13,8 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13']
|
||||
group: [1, 2, 3, 4, 5, 6, 7, 8]
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13', '3.14']
|
||||
group: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -40,6 +40,10 @@ jobs:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
|
||||
- name: Install system build dependencies
|
||||
if: matrix.python-version == '3.14'
|
||||
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-groups --all-extras
|
||||
|
||||
@@ -49,7 +53,7 @@ jobs:
|
||||
path: .test_durations_py*
|
||||
key: test-durations-py${{ matrix.python-version }}
|
||||
|
||||
- name: Run tests (group ${{ matrix.group }} of 8)
|
||||
- name: Run tests (group ${{ matrix.group }} of 16)
|
||||
run: |
|
||||
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
||||
DURATION_FILE="../../.test_durations_py${PYTHON_VERSION_SAFE}"
|
||||
@@ -73,17 +77,17 @@ jobs:
|
||||
|
||||
cd lib/crewai && uv run pytest \
|
||||
-vv \
|
||||
--splits 8 \
|
||||
--splits 16 \
|
||||
--group ${{ matrix.group }} \
|
||||
$DURATIONS_ARG \
|
||||
--durations=10 \
|
||||
--maxfail=3
|
||||
|
||||
- name: Run tool tests (group ${{ matrix.group }} of 8)
|
||||
- name: Run tool tests (group ${{ matrix.group }} of 16)
|
||||
run: |
|
||||
cd lib/crewai-tools && uv run pytest \
|
||||
-vv \
|
||||
--splits 8 \
|
||||
--splits 16 \
|
||||
--group ${{ matrix.group }} \
|
||||
--durations=10 \
|
||||
--maxfail=3
|
||||
|
||||
6
.github/workflows/type-checker.yml
vendored
6
.github/workflows/type-checker.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11", "3.12", "3.13"]
|
||||
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -39,6 +39,10 @@ jobs:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
|
||||
- name: Install system build dependencies
|
||||
if: matrix.python-version == '3.14'
|
||||
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-groups --all-extras
|
||||
|
||||
|
||||
10
.github/workflows/update-test-durations.yml
vendored
10
.github/workflows/update-test-durations.yml
vendored
@@ -16,11 +16,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13']
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13', '3.14']
|
||||
env:
|
||||
OPENAI_API_KEY: fake-api-key
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -44,6 +44,10 @@ jobs:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
|
||||
- name: Install system build dependencies
|
||||
if: matrix.python-version == '3.14'
|
||||
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-groups --all-extras
|
||||
|
||||
@@ -68,4 +72,4 @@ jobs:
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
|
||||
@@ -6,7 +6,7 @@ readme = "README.md"
|
||||
authors = [
|
||||
{ name = "Greyson LaLonde", email = "greyson@crewai.com" }
|
||||
]
|
||||
requires-python = ">=3.10, <3.14"
|
||||
requires-python = ">=3.10,<3.15"
|
||||
dependencies = [
|
||||
"Pillow~=10.4.0",
|
||||
"pypdf~=4.0.0",
|
||||
|
||||
@@ -6,7 +6,7 @@ readme = "README.md"
|
||||
authors = [
|
||||
{ name = "João Moura", email = "joaomdmoura@gmail.com" },
|
||||
]
|
||||
requires-python = ">=3.10, <3.14"
|
||||
requires-python = ">=3.10,<3.15"
|
||||
dependencies = [
|
||||
"lancedb~=0.5.4",
|
||||
"pytube~=15.0.0",
|
||||
@@ -118,7 +118,7 @@ rag = [
|
||||
"lxml>=5.3.0,<5.4.0", # Pin to avoid etree import issues in 5.4.0
|
||||
]
|
||||
xml = [
|
||||
"unstructured[local-inference, all-docs]>=0.17.2"
|
||||
"unstructured[local-inference, all-docs]>=0.17.2,<0.18.31"
|
||||
]
|
||||
oxylabs = [
|
||||
"oxylabs==2.0.0"
|
||||
|
||||
@@ -6,7 +6,7 @@ readme = "README.md"
|
||||
authors = [
|
||||
{ name = "Joao Moura", email = "joao@crewai.com" }
|
||||
]
|
||||
requires-python = ">=3.10, <3.14"
|
||||
requires-python = ">=3.10,<3.15"
|
||||
dependencies = [
|
||||
# Core Dependencies
|
||||
"pydantic~=2.11.9",
|
||||
|
||||
@@ -3,7 +3,7 @@ name = "{{folder_name}}"
|
||||
version = "0.1.0"
|
||||
description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
requires-python = ">=3.10,<3.15"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.9.3"
|
||||
]
|
||||
|
||||
@@ -3,7 +3,7 @@ name = "{{folder_name}}"
|
||||
version = "0.1.0"
|
||||
description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
requires-python = ">=3.10,<3.15"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.9.3"
|
||||
]
|
||||
|
||||
@@ -3,9 +3,9 @@ name = "{{folder_name}}"
|
||||
version = "0.1.0"
|
||||
description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
requires-python = ">=3.10,<3.15"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.203.1"
|
||||
"crewai[tools]==1.9.3"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -187,6 +187,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
_task_output_handler: TaskOutputStorageHandler = PrivateAttr(
|
||||
default_factory=TaskOutputStorageHandler
|
||||
)
|
||||
_kickoff_event_id: str | None = PrivateAttr(default=None)
|
||||
|
||||
name: str | None = Field(default="crew")
|
||||
cache: bool = Field(default=True)
|
||||
@@ -759,7 +760,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffFailedEvent(error=str(e), crew_name=self.name),
|
||||
CrewKickoffFailedEvent(
|
||||
error=str(e),
|
||||
crew_name=self.name,
|
||||
started_event_id=self._kickoff_event_id,
|
||||
),
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
@@ -949,7 +954,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffFailedEvent(error=str(e), crew_name=self.name),
|
||||
CrewKickoffFailedEvent(
|
||||
error=str(e),
|
||||
crew_name=self.name,
|
||||
started_event_id=self._kickoff_event_id,
|
||||
),
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
@@ -1524,6 +1533,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
crew_name=self.name,
|
||||
output=final_task_output,
|
||||
total_tokens=self.token_usage.total_tokens,
|
||||
started_event_id=self._kickoff_event_id,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -265,10 +265,9 @@ def prepare_kickoff(
|
||||
normalized = {}
|
||||
normalized = before_callback(normalized)
|
||||
|
||||
future = crewai_event_bus.emit(
|
||||
crew,
|
||||
CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized),
|
||||
)
|
||||
started_event = CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized)
|
||||
crew._kickoff_event_id = started_event.event_id
|
||||
future = crewai_event_bus.emit(crew, started_event)
|
||||
if future is not None:
|
||||
try:
|
||||
future.result()
|
||||
|
||||
@@ -1696,6 +1696,99 @@ class OpenAICompletion(BaseLLM):
|
||||
|
||||
return content
|
||||
|
||||
def _finalize_streaming_response(
|
||||
self,
|
||||
full_response: str,
|
||||
tool_calls: dict[int, dict[str, Any]],
|
||||
usage_data: dict[str, int],
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
) -> str | list[dict[str, Any]]:
|
||||
"""Finalize a streaming response with usage tracking, tool call handling, and events.
|
||||
|
||||
Args:
|
||||
full_response: The accumulated text response from the stream.
|
||||
tool_calls: Accumulated tool calls from the stream, keyed by index.
|
||||
usage_data: Token usage data from the stream.
|
||||
params: The completion parameters containing messages.
|
||||
available_functions: Available functions for tool calling.
|
||||
from_task: Task that initiated the call.
|
||||
from_agent: Agent that initiated the call.
|
||||
|
||||
Returns:
|
||||
Tool calls list when tools were invoked without available_functions,
|
||||
tool execution result when available_functions is provided,
|
||||
or the text response string.
|
||||
"""
|
||||
self._track_token_usage_internal(usage_data)
|
||||
|
||||
if tool_calls and not available_functions:
|
||||
tool_calls_list = [
|
||||
{
|
||||
"id": call_data["id"],
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": call_data["name"],
|
||||
"arguments": call_data["arguments"],
|
||||
},
|
||||
"index": call_data["index"],
|
||||
}
|
||||
for call_data in tool_calls.values()
|
||||
]
|
||||
self._emit_call_completed_event(
|
||||
response=tool_calls_list,
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return tool_calls_list
|
||||
|
||||
if tool_calls and available_functions:
|
||||
for call_data in tool_calls.values():
|
||||
function_name = call_data["name"]
|
||||
arguments = call_data["arguments"]
|
||||
|
||||
if not function_name or not arguments:
|
||||
continue
|
||||
|
||||
if function_name not in available_functions:
|
||||
logging.warning(
|
||||
f"Function '{function_name}' not found in available functions"
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
function_args = json.loads(arguments)
|
||||
except json.JSONDecodeError as e:
|
||||
logging.error(f"Failed to parse streamed tool arguments: {e}")
|
||||
continue
|
||||
|
||||
result = self._handle_tool_execution(
|
||||
function_name=function_name,
|
||||
function_args=function_args,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
full_response = self._apply_stop_words(full_response)
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=full_response,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
|
||||
return full_response
|
||||
|
||||
def _handle_streaming_completion(
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
@@ -1703,7 +1796,7 @@ class OpenAICompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | BaseModel:
|
||||
) -> str | list[dict[str, Any]] | BaseModel:
|
||||
"""Handle streaming chat completion."""
|
||||
full_response = ""
|
||||
tool_calls: dict[int, dict[str, Any]] = {}
|
||||
@@ -1820,54 +1913,20 @@ class OpenAICompletion(BaseLLM):
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
self._track_token_usage_internal(usage_data)
|
||||
|
||||
if tool_calls and available_functions:
|
||||
for call_data in tool_calls.values():
|
||||
function_name = call_data["name"]
|
||||
arguments = call_data["arguments"]
|
||||
|
||||
# Skip if function name is empty or arguments are empty
|
||||
if not function_name or not arguments:
|
||||
continue
|
||||
|
||||
# Check if function exists in available functions
|
||||
if function_name not in available_functions:
|
||||
logging.warning(
|
||||
f"Function '{function_name}' not found in available functions"
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
function_args = json.loads(arguments)
|
||||
except json.JSONDecodeError as e:
|
||||
logging.error(f"Failed to parse streamed tool arguments: {e}")
|
||||
continue
|
||||
|
||||
result = self._handle_tool_execution(
|
||||
function_name=function_name,
|
||||
function_args=function_args,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
full_response = self._apply_stop_words(full_response)
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=full_response,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
result = self._finalize_streaming_response(
|
||||
full_response=full_response,
|
||||
tool_calls=tool_calls,
|
||||
usage_data=usage_data,
|
||||
params=params,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
|
||||
return self._invoke_after_llm_call_hooks(
|
||||
params["messages"], full_response, from_agent
|
||||
)
|
||||
if isinstance(result, str):
|
||||
return self._invoke_after_llm_call_hooks(
|
||||
params["messages"], result, from_agent
|
||||
)
|
||||
return result
|
||||
|
||||
async def _ahandle_completion(
|
||||
self,
|
||||
@@ -2016,7 +2075,7 @@ class OpenAICompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | BaseModel:
|
||||
) -> str | list[dict[str, Any]] | BaseModel:
|
||||
"""Handle async streaming chat completion."""
|
||||
full_response = ""
|
||||
tool_calls: dict[int, dict[str, Any]] = {}
|
||||
@@ -2142,51 +2201,16 @@ class OpenAICompletion(BaseLLM):
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
self._track_token_usage_internal(usage_data)
|
||||
|
||||
if tool_calls and available_functions:
|
||||
for call_data in tool_calls.values():
|
||||
function_name = call_data["name"]
|
||||
arguments = call_data["arguments"]
|
||||
|
||||
if not function_name or not arguments:
|
||||
continue
|
||||
|
||||
if function_name not in available_functions:
|
||||
logging.warning(
|
||||
f"Function '{function_name}' not found in available functions"
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
function_args = json.loads(arguments)
|
||||
except json.JSONDecodeError as e:
|
||||
logging.error(f"Failed to parse streamed tool arguments: {e}")
|
||||
continue
|
||||
|
||||
result = self._handle_tool_execution(
|
||||
function_name=function_name,
|
||||
function_args=function_args,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
full_response = self._apply_stop_words(full_response)
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=full_response,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
return self._finalize_streaming_response(
|
||||
full_response=full_response,
|
||||
tool_calls=tool_calls,
|
||||
usage_data=usage_data,
|
||||
params=params,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
|
||||
return full_response
|
||||
|
||||
def supports_function_calling(self) -> bool:
|
||||
"""Check if the model supports function calling."""
|
||||
return not self.is_o1_model
|
||||
|
||||
@@ -230,7 +230,7 @@ class TestDeployCommand(unittest.TestCase):
|
||||
[project]
|
||||
name = "test_project"
|
||||
version = "0.1.0"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
requires-python = ">=3.10,<3.15"
|
||||
dependencies = ["crewai"]
|
||||
""",
|
||||
)
|
||||
@@ -249,7 +249,7 @@ class TestDeployCommand(unittest.TestCase):
|
||||
[project]
|
||||
name = "test_project"
|
||||
version = "0.1.0"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
requires-python = ">=3.10,<3.15"
|
||||
dependencies = ["crewai"]
|
||||
""",
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
import sys
|
||||
import types
|
||||
from typing import Any
|
||||
from unittest.mock import patch, MagicMock
|
||||
import openai
|
||||
import pytest
|
||||
@@ -1578,3 +1579,167 @@ def test_openai_structured_output_preserves_json_with_stop_word_patterns():
|
||||
assert "Action:" in result.action_taken
|
||||
assert "Observation:" in result.observation_result
|
||||
assert "Final Answer:" in result.final_answer
|
||||
|
||||
|
||||
def test_openai_streaming_returns_tool_calls_without_available_functions():
|
||||
"""Test that streaming returns tool calls list when available_functions is None.
|
||||
|
||||
This mirrors the non-streaming path where tool_calls are returned for
|
||||
the executor to handle. Reproduces the bug where streaming with tool
|
||||
calls would return empty text instead of tool_calls when
|
||||
available_functions was not provided (as the crew executor does).
|
||||
"""
|
||||
llm = LLM(model="openai/gpt-4o-mini", stream=True)
|
||||
|
||||
mock_chunk_1 = MagicMock()
|
||||
mock_chunk_1.choices = [MagicMock()]
|
||||
mock_chunk_1.choices[0].delta = MagicMock()
|
||||
mock_chunk_1.choices[0].delta.content = None
|
||||
mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()]
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].index = 0
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123"
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock()
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator"
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr'
|
||||
mock_chunk_1.choices[0].finish_reason = None
|
||||
mock_chunk_1.usage = None
|
||||
mock_chunk_1.id = "chatcmpl-1"
|
||||
|
||||
mock_chunk_2 = MagicMock()
|
||||
mock_chunk_2.choices = [MagicMock()]
|
||||
mock_chunk_2.choices[0].delta = MagicMock()
|
||||
mock_chunk_2.choices[0].delta.content = None
|
||||
mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()]
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].index = 0
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].id = None
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock()
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}'
|
||||
mock_chunk_2.choices[0].finish_reason = None
|
||||
mock_chunk_2.usage = None
|
||||
mock_chunk_2.id = "chatcmpl-1"
|
||||
|
||||
mock_chunk_3 = MagicMock()
|
||||
mock_chunk_3.choices = [MagicMock()]
|
||||
mock_chunk_3.choices[0].delta = MagicMock()
|
||||
mock_chunk_3.choices[0].delta.content = None
|
||||
mock_chunk_3.choices[0].delta.tool_calls = None
|
||||
mock_chunk_3.choices[0].finish_reason = "tool_calls"
|
||||
mock_chunk_3.usage = MagicMock()
|
||||
mock_chunk_3.usage.prompt_tokens = 10
|
||||
mock_chunk_3.usage.completion_tokens = 5
|
||||
mock_chunk_3.id = "chatcmpl-1"
|
||||
|
||||
with patch.object(
|
||||
llm.client.chat.completions, "create", return_value=iter([mock_chunk_1, mock_chunk_2, mock_chunk_3])
|
||||
):
|
||||
result = llm.call(
|
||||
messages=[{"role": "user", "content": "Calculate 1+1"}],
|
||||
tools=[{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "calculator",
|
||||
"description": "Calculate expression",
|
||||
"parameters": {"type": "object", "properties": {"expression": {"type": "string"}}},
|
||||
},
|
||||
}],
|
||||
available_functions=None,
|
||||
)
|
||||
|
||||
assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}"
|
||||
assert len(result) == 1
|
||||
assert result[0]["function"]["name"] == "calculator"
|
||||
assert result[0]["function"]["arguments"] == '{"expression": "1+1"}'
|
||||
assert result[0]["id"] == "call_abc123"
|
||||
assert result[0]["type"] == "function"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_openai_async_streaming_returns_tool_calls_without_available_functions():
|
||||
"""Test that async streaming returns tool calls list when available_functions is None.
|
||||
|
||||
Same as the sync test but for the async path (_ahandle_streaming_completion).
|
||||
"""
|
||||
llm = LLM(model="openai/gpt-4o-mini", stream=True)
|
||||
|
||||
mock_chunk_1 = MagicMock()
|
||||
mock_chunk_1.choices = [MagicMock()]
|
||||
mock_chunk_1.choices[0].delta = MagicMock()
|
||||
mock_chunk_1.choices[0].delta.content = None
|
||||
mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()]
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].index = 0
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123"
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock()
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator"
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr'
|
||||
mock_chunk_1.choices[0].finish_reason = None
|
||||
mock_chunk_1.usage = None
|
||||
mock_chunk_1.id = "chatcmpl-1"
|
||||
|
||||
mock_chunk_2 = MagicMock()
|
||||
mock_chunk_2.choices = [MagicMock()]
|
||||
mock_chunk_2.choices[0].delta = MagicMock()
|
||||
mock_chunk_2.choices[0].delta.content = None
|
||||
mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()]
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].index = 0
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].id = None
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock()
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}'
|
||||
mock_chunk_2.choices[0].finish_reason = None
|
||||
mock_chunk_2.usage = None
|
||||
mock_chunk_2.id = "chatcmpl-1"
|
||||
|
||||
mock_chunk_3 = MagicMock()
|
||||
mock_chunk_3.choices = [MagicMock()]
|
||||
mock_chunk_3.choices[0].delta = MagicMock()
|
||||
mock_chunk_3.choices[0].delta.content = None
|
||||
mock_chunk_3.choices[0].delta.tool_calls = None
|
||||
mock_chunk_3.choices[0].finish_reason = "tool_calls"
|
||||
mock_chunk_3.usage = MagicMock()
|
||||
mock_chunk_3.usage.prompt_tokens = 10
|
||||
mock_chunk_3.usage.completion_tokens = 5
|
||||
mock_chunk_3.id = "chatcmpl-1"
|
||||
|
||||
class MockAsyncStream:
|
||||
"""Async iterator that mimics OpenAI's async streaming response."""
|
||||
|
||||
def __init__(self, chunks: list[Any]) -> None:
|
||||
self._chunks = chunks
|
||||
self._index = 0
|
||||
|
||||
def __aiter__(self) -> "MockAsyncStream":
|
||||
return self
|
||||
|
||||
async def __anext__(self) -> Any:
|
||||
if self._index >= len(self._chunks):
|
||||
raise StopAsyncIteration
|
||||
chunk = self._chunks[self._index]
|
||||
self._index += 1
|
||||
return chunk
|
||||
|
||||
async def mock_create(**kwargs: Any) -> MockAsyncStream:
|
||||
return MockAsyncStream([mock_chunk_1, mock_chunk_2, mock_chunk_3])
|
||||
|
||||
with patch.object(
|
||||
llm.async_client.chat.completions, "create", side_effect=mock_create
|
||||
):
|
||||
result = await llm.acall(
|
||||
messages=[{"role": "user", "content": "Calculate 1+1"}],
|
||||
tools=[{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "calculator",
|
||||
"description": "Calculate expression",
|
||||
"parameters": {"type": "object", "properties": {"expression": {"type": "string"}}},
|
||||
},
|
||||
}],
|
||||
available_functions=None,
|
||||
)
|
||||
|
||||
assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}"
|
||||
assert len(result) == 1
|
||||
assert result[0]["function"]["name"] == "calculator"
|
||||
assert result[0]["function"]["arguments"] == '{"expression": "1+1"}'
|
||||
assert result[0]["id"] == "call_abc123"
|
||||
assert result[0]["type"] == "function"
|
||||
|
||||
@@ -6,7 +6,7 @@ readme = "README.md"
|
||||
authors = [
|
||||
{ name = "Greyson R. LaLonde", email = "greyson@crewai.com" },
|
||||
]
|
||||
requires-python = ">=3.10, <3.14"
|
||||
requires-python = ">=3.10,<3.15"
|
||||
classifiers = ["Private :: Do Not Upload"]
|
||||
private = true
|
||||
dependencies = [
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
name = "crewai-workspace"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
requires-python = ">=3.10,<3.15"
|
||||
authors = [
|
||||
{ name = "Joao Moura", email = "joao@crewai.com" }
|
||||
]
|
||||
@@ -143,6 +143,11 @@ python_classes = "Test*"
|
||||
python_functions = "test_*"
|
||||
|
||||
|
||||
[tool.uv]
|
||||
constraint-dependencies = [
|
||||
"onnxruntime<1.24; python_version < '3.11'",
|
||||
]
|
||||
|
||||
[tool.uv.workspace]
|
||||
members = [
|
||||
"lib/crewai",
|
||||
|
||||
Reference in New Issue
Block a user