Compare commits

..

1 Commits

Author SHA1 Message Date
Devin AI
2e0a646cad fix: broken Task Attributes table in docs (#4403)
The Guardrails row in the Task Attributes table had an unescaped pipe
character in the type column (Optional[List[Callable] | List[str]]) which
was interpreted as a column delimiter, breaking the table rendering.

Fixed by escaping the pipe with backslash and removing the extra column
from the header separator row.

Added tests to validate markdown table structure across all doc languages.

Co-Authored-By: João <joao@crewai.com>
2026-02-07 08:34:22 +00:00
25 changed files with 2125 additions and 3190 deletions

View File

@@ -65,8 +65,6 @@ body:
- '3.10'
- '3.11'
- '3.12'
- '3.13'
- '3.14'
validations:
required: true
- type: input

View File

@@ -14,18 +14,13 @@ paths-ignore:
- "lib/crewai/src/crewai/experimental/a2a/**"
paths:
# Include GitHub Actions workflows/composite actions for CodeQL actions analysis
- ".github/workflows/**"
- ".github/actions/**"
# Include all Python source code from workspace packages
- "lib/crewai/src/**"
- "lib/crewai-tools/src/**"
- "lib/crewai-files/src/**"
- "lib/devtools/src/**"
# Include tests (but exclude cassettes via paths-ignore)
- "lib/crewai/tests/**"
- "lib/crewai-tools/tests/**"
- "lib/crewai-files/tests/**"
- "lib/devtools/tests/**"
# Configure specific queries or packs if needed

View File

@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
python-version: ["3.10", "3.11", "3.12", "3.13"]
steps:
- name: Checkout repository
@@ -32,10 +32,6 @@ jobs:
python-version: ${{ matrix.python-version }}
enable-cache: false
- name: Install system build dependencies
if: matrix.python-version == '3.14'
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
- name: Install dependencies and populate cache
run: |
echo "Building global UV cache for Python ${{ matrix.python-version }}..."

View File

@@ -69,7 +69,7 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v4
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
@@ -98,6 +98,6 @@ jobs:
exit 1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v4
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

View File

@@ -26,15 +26,15 @@ jobs:
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py3.12-${{ hashFiles('uv.lock') }}
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
restore-keys: |
uv-main-py3.12-
uv-main-py3.11-
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
version: "0.8.4"
python-version: "3.12"
python-version: "3.11"
enable-cache: false
- name: Install dependencies
@@ -66,4 +66,4 @@ jobs:
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py3.12-${{ hashFiles('uv.lock') }}
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}

View File

@@ -13,8 +13,8 @@ jobs:
strategy:
fail-fast: true
matrix:
python-version: ['3.10', '3.11', '3.12', '3.13', '3.14']
group: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
python-version: ['3.10', '3.11', '3.12', '3.13']
group: [1, 2, 3, 4, 5, 6, 7, 8]
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -40,10 +40,6 @@ jobs:
python-version: ${{ matrix.python-version }}
enable-cache: false
- name: Install system build dependencies
if: matrix.python-version == '3.14'
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
- name: Install the project
run: uv sync --all-groups --all-extras
@@ -53,7 +49,7 @@ jobs:
path: .test_durations_py*
key: test-durations-py${{ matrix.python-version }}
- name: Run tests (group ${{ matrix.group }} of 16)
- name: Run tests (group ${{ matrix.group }} of 8)
run: |
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
DURATION_FILE="../../.test_durations_py${PYTHON_VERSION_SAFE}"
@@ -77,17 +73,17 @@ jobs:
cd lib/crewai && uv run pytest \
-vv \
--splits 16 \
--splits 8 \
--group ${{ matrix.group }} \
$DURATIONS_ARG \
--durations=10 \
--maxfail=3
- name: Run tool tests (group ${{ matrix.group }} of 16)
- name: Run tool tests (group ${{ matrix.group }} of 8)
run: |
cd lib/crewai-tools && uv run pytest \
-vv \
--splits 16 \
--splits 8 \
--group ${{ matrix.group }} \
--durations=10 \
--maxfail=3

View File

@@ -12,7 +12,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
python-version: ["3.10", "3.11", "3.12", "3.13"]
steps:
- name: Checkout code
@@ -39,10 +39,6 @@ jobs:
python-version: ${{ matrix.python-version }}
enable-cache: false
- name: Install system build dependencies
if: matrix.python-version == '3.14'
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
- name: Install dependencies
run: uv sync --all-groups --all-extras

View File

@@ -16,11 +16,11 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12', '3.13', '3.14']
python-version: ['3.10', '3.11', '3.12', '3.13']
env:
OPENAI_API_KEY: fake-api-key
PYTHONUNBUFFERED: 1
steps:
- name: Checkout repository
uses: actions/checkout@v4
@@ -44,10 +44,6 @@ jobs:
python-version: ${{ matrix.python-version }}
enable-cache: false
- name: Install system build dependencies
if: matrix.python-version == '3.14'
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
- name: Install the project
run: uv sync --all-groups --all-extras
@@ -72,4 +68,4 @@ jobs:
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}

View File

@@ -45,26 +45,26 @@ crew = Crew(
## Task Attributes
| Attribute | Parameters | Type | Description |
| :------------------------------------- | :---------------------- | :-------------------------- | :-------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- |
| **Description** | `description` | `str` | A clear, concise statement of what the task entails. |
| **Expected Output** | `expected_output` | `str` | A detailed description of what the task's completion looks like. |
| **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. |
| **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. |
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. |
| **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Other tasks whose outputs will be used as context for this task. |
| **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | Whether the task should be executed asynchronously. Defaults to False. |
| **Human Input** _(optional)_ | `human_input` | `Optional[bool]` | Whether the task should have a human review the final answer of the agent. Defaults to False. |
| **Markdown** _(optional)_ | `markdown` | `Optional[bool]` | Whether the task should instruct the agent to return the final answer formatted in Markdown. Defaults to False. |
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. |
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. |
| **Create Directory** _(optional)_ | `create_directory` | `Optional[bool]` | Whether to create the directory for output_file if it doesn't exist. Defaults to True. |
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. |
| **Output Pydantic** _(optional)_ | `output_pydantic` | `Optional[Type[BaseModel]]` | A Pydantic model for task output. |
| **Callback** _(optional)_ | `callback` | `Optional[Any]` | Function/object to be executed after task completion. |
| **Guardrail** _(optional)_ | `guardrail` | `Optional[Callable]` | Function to validate task output before proceeding to next task. |
| **Guardrails** _(optional)_ | `guardrails` | `Optional[List[Callable] | List[str]]` | List of guardrails to validate task output before proceeding to next task. |
| **Guardrail Max Retries** _(optional)_ | `guardrail_max_retries` | `Optional[int]` | Maximum number of retries when guardrail validation fails. Defaults to 3. |
| Attribute | Parameters | Type | Description |
| :------------------------------------- | :---------------------- | :-------------------------------------- | :-------------------------------------------------------------------------------------------------------------- |
| **Description** | `description` | `str` | A clear, concise statement of what the task entails. |
| **Expected Output** | `expected_output` | `str` | A detailed description of what the task's completion looks like. |
| **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. |
| **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. |
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. |
| **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Other tasks whose outputs will be used as context for this task. |
| **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | Whether the task should be executed asynchronously. Defaults to False. |
| **Human Input** _(optional)_ | `human_input` | `Optional[bool]` | Whether the task should have a human review the final answer of the agent. Defaults to False. |
| **Markdown** _(optional)_ | `markdown` | `Optional[bool]` | Whether the task should instruct the agent to return the final answer formatted in Markdown. Defaults to False. |
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. |
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. |
| **Create Directory** _(optional)_ | `create_directory` | `Optional[bool]` | Whether to create the directory for output_file if it doesn't exist. Defaults to True. |
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. |
| **Output Pydantic** _(optional)_ | `output_pydantic` | `Optional[Type[BaseModel]]` | A Pydantic model for task output. |
| **Callback** _(optional)_ | `callback` | `Optional[Any]` | Function/object to be executed after task completion. |
| **Guardrail** _(optional)_ | `guardrail` | `Optional[Callable]` | Function to validate task output before proceeding to next task. |
| **Guardrails** _(optional)_ | `guardrails` | `Optional[List[Callable] \| List[str]]` | List of guardrails to validate task output before proceeding to next task. |
| **Guardrail Max Retries** _(optional)_ | `guardrail_max_retries` | `Optional[int]` | Maximum number of retries when guardrail validation fails. Defaults to 3. |
<Note type="warning" title="Deprecated: max_retries">
The task attribute `max_retries` is deprecated and will be removed in v1.0.0.

View File

@@ -6,7 +6,7 @@ readme = "README.md"
authors = [
{ name = "Greyson LaLonde", email = "greyson@crewai.com" }
]
requires-python = ">=3.10,<3.15"
requires-python = ">=3.10, <3.14"
dependencies = [
"Pillow~=10.4.0",
"pypdf~=4.0.0",

View File

@@ -6,7 +6,7 @@ readme = "README.md"
authors = [
{ name = "João Moura", email = "joaomdmoura@gmail.com" },
]
requires-python = ">=3.10,<3.15"
requires-python = ">=3.10, <3.14"
dependencies = [
"lancedb~=0.5.4",
"pytube~=15.0.0",
@@ -118,7 +118,7 @@ rag = [
"lxml>=5.3.0,<5.4.0", # Pin to avoid etree import issues in 5.4.0
]
xml = [
"unstructured[local-inference, all-docs]>=0.17.2,<0.18.31"
"unstructured[local-inference, all-docs]>=0.17.2"
]
oxylabs = [
"oxylabs==2.0.0"

View File

@@ -33,11 +33,8 @@ def test_brave_tool_search(mock_get, brave_tool):
mock_get.return_value.json.return_value = mock_response
result = brave_tool.run(query="test")
data = json.loads(result)
assert isinstance(data, list)
assert len(data) >= 1
assert data[0]["title"] == "Test Title"
assert data[0]["url"] == "http://test.com"
assert "Test Title" in result
assert "http://test.com" in result
@patch("requests.get")

View File

@@ -6,7 +6,7 @@ readme = "README.md"
authors = [
{ name = "Joao Moura", email = "joao@crewai.com" }
]
requires-python = ">=3.10,<3.15"
requires-python = ">=3.10, <3.14"
dependencies = [
# Core Dependencies
"pydantic~=2.11.9",
@@ -14,7 +14,7 @@ dependencies = [
"instructor>=1.3.3",
# Text Processing
"pdfplumber~=0.11.4",
"regex~=2026.1.15",
"regex~=2024.9.11",
# Telemetry and Monitoring
"opentelemetry-api~=1.34.0",
"opentelemetry-sdk~=1.34.0",
@@ -36,7 +36,7 @@ dependencies = [
"json5~=0.10.0",
"portalocker~=2.7.0",
"pydantic-settings~=2.10.1",
"mcp~=1.26.0",
"mcp~=1.23.1",
"uv~=0.9.13",
"aiosqlite~=0.21.0",
]

View File

@@ -3,7 +3,7 @@ name = "{{folder_name}}"
version = "0.1.0"
description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.15"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.9.3"
]

View File

@@ -3,7 +3,7 @@ name = "{{folder_name}}"
version = "0.1.0"
description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.15"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.9.3"
]

View File

@@ -3,9 +3,9 @@ name = "{{folder_name}}"
version = "0.1.0"
description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.15"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.9.3"
"crewai[tools]>=0.203.1"
]
[tool.crewai]

View File

@@ -187,7 +187,6 @@ class Crew(FlowTrackable, BaseModel):
_task_output_handler: TaskOutputStorageHandler = PrivateAttr(
default_factory=TaskOutputStorageHandler
)
_kickoff_event_id: str | None = PrivateAttr(default=None)
name: str | None = Field(default="crew")
cache: bool = Field(default=True)
@@ -760,11 +759,7 @@ class Crew(FlowTrackable, BaseModel):
except Exception as e:
crewai_event_bus.emit(
self,
CrewKickoffFailedEvent(
error=str(e),
crew_name=self.name,
started_event_id=self._kickoff_event_id,
),
CrewKickoffFailedEvent(error=str(e), crew_name=self.name),
)
raise
finally:
@@ -954,11 +949,7 @@ class Crew(FlowTrackable, BaseModel):
except Exception as e:
crewai_event_bus.emit(
self,
CrewKickoffFailedEvent(
error=str(e),
crew_name=self.name,
started_event_id=self._kickoff_event_id,
),
CrewKickoffFailedEvent(error=str(e), crew_name=self.name),
)
raise
finally:
@@ -1533,7 +1524,6 @@ class Crew(FlowTrackable, BaseModel):
crew_name=self.name,
output=final_task_output,
total_tokens=self.token_usage.total_tokens,
started_event_id=self._kickoff_event_id,
),
)

View File

@@ -265,9 +265,10 @@ def prepare_kickoff(
normalized = {}
normalized = before_callback(normalized)
started_event = CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized)
crew._kickoff_event_id = started_event.event_id
future = crewai_event_bus.emit(crew, started_event)
future = crewai_event_bus.emit(
crew,
CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized),
)
if future is not None:
try:
future.result()

View File

@@ -1696,99 +1696,6 @@ class OpenAICompletion(BaseLLM):
return content
def _finalize_streaming_response(
self,
full_response: str,
tool_calls: dict[int, dict[str, Any]],
usage_data: dict[str, int],
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
) -> str | list[dict[str, Any]]:
"""Finalize a streaming response with usage tracking, tool call handling, and events.
Args:
full_response: The accumulated text response from the stream.
tool_calls: Accumulated tool calls from the stream, keyed by index.
usage_data: Token usage data from the stream.
params: The completion parameters containing messages.
available_functions: Available functions for tool calling.
from_task: Task that initiated the call.
from_agent: Agent that initiated the call.
Returns:
Tool calls list when tools were invoked without available_functions,
tool execution result when available_functions is provided,
or the text response string.
"""
self._track_token_usage_internal(usage_data)
if tool_calls and not available_functions:
tool_calls_list = [
{
"id": call_data["id"],
"type": "function",
"function": {
"name": call_data["name"],
"arguments": call_data["arguments"],
},
"index": call_data["index"],
}
for call_data in tool_calls.values()
]
self._emit_call_completed_event(
response=tool_calls_list,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return tool_calls_list
if tool_calls and available_functions:
for call_data in tool_calls.values():
function_name = call_data["name"]
arguments = call_data["arguments"]
if not function_name or not arguments:
continue
if function_name not in available_functions:
logging.warning(
f"Function '{function_name}' not found in available functions"
)
continue
try:
function_args = json.loads(arguments)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse streamed tool arguments: {e}")
continue
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return full_response
def _handle_streaming_completion(
self,
params: dict[str, Any],
@@ -1796,7 +1703,7 @@ class OpenAICompletion(BaseLLM):
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | list[dict[str, Any]] | BaseModel:
) -> str | BaseModel:
"""Handle streaming chat completion."""
full_response = ""
tool_calls: dict[int, dict[str, Any]] = {}
@@ -1913,20 +1820,54 @@ class OpenAICompletion(BaseLLM):
response_id=response_id_stream,
)
result = self._finalize_streaming_response(
full_response=full_response,
tool_calls=tool_calls,
usage_data=usage_data,
params=params,
available_functions=available_functions,
self._track_token_usage_internal(usage_data)
if tool_calls and available_functions:
for call_data in tool_calls.values():
function_name = call_data["name"]
arguments = call_data["arguments"]
# Skip if function name is empty or arguments are empty
if not function_name or not arguments:
continue
# Check if function exists in available functions
if function_name not in available_functions:
logging.warning(
f"Function '{function_name}' not found in available functions"
)
continue
try:
function_args = json.loads(arguments)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse streamed tool arguments: {e}")
continue
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return self._invoke_after_llm_call_hooks(
params["messages"], full_response, from_agent
)
if isinstance(result, str):
return self._invoke_after_llm_call_hooks(
params["messages"], result, from_agent
)
return result
async def _ahandle_completion(
self,
@@ -2075,7 +2016,7 @@ class OpenAICompletion(BaseLLM):
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | list[dict[str, Any]] | BaseModel:
) -> str | BaseModel:
"""Handle async streaming chat completion."""
full_response = ""
tool_calls: dict[int, dict[str, Any]] = {}
@@ -2201,16 +2142,51 @@ class OpenAICompletion(BaseLLM):
response_id=response_id_stream,
)
return self._finalize_streaming_response(
full_response=full_response,
tool_calls=tool_calls,
usage_data=usage_data,
params=params,
available_functions=available_functions,
self._track_token_usage_internal(usage_data)
if tool_calls and available_functions:
for call_data in tool_calls.values():
function_name = call_data["name"]
arguments = call_data["arguments"]
if not function_name or not arguments:
continue
if function_name not in available_functions:
logging.warning(
f"Function '{function_name}' not found in available functions"
)
continue
try:
function_args = json.loads(arguments)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse streamed tool arguments: {e}")
continue
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return full_response
def supports_function_calling(self) -> bool:
"""Check if the model supports function calling."""
return not self.is_o1_model

View File

@@ -230,7 +230,7 @@ class TestDeployCommand(unittest.TestCase):
[project]
name = "test_project"
version = "0.1.0"
requires-python = ">=3.10,<3.15"
requires-python = ">=3.10,<3.14"
dependencies = ["crewai"]
""",
)
@@ -249,7 +249,7 @@ class TestDeployCommand(unittest.TestCase):
[project]
name = "test_project"
version = "0.1.0"
requires-python = ">=3.10,<3.15"
requires-python = ">=3.10,<3.14"
dependencies = ["crewai"]
""",
)

View File

@@ -1,7 +1,6 @@
import os
import sys
import types
from typing import Any
from unittest.mock import patch, MagicMock
import openai
import pytest
@@ -1579,167 +1578,3 @@ def test_openai_structured_output_preserves_json_with_stop_word_patterns():
assert "Action:" in result.action_taken
assert "Observation:" in result.observation_result
assert "Final Answer:" in result.final_answer
def test_openai_streaming_returns_tool_calls_without_available_functions():
"""Test that streaming returns tool calls list when available_functions is None.
This mirrors the non-streaming path where tool_calls are returned for
the executor to handle. Reproduces the bug where streaming with tool
calls would return empty text instead of tool_calls when
available_functions was not provided (as the crew executor does).
"""
llm = LLM(model="openai/gpt-4o-mini", stream=True)
mock_chunk_1 = MagicMock()
mock_chunk_1.choices = [MagicMock()]
mock_chunk_1.choices[0].delta = MagicMock()
mock_chunk_1.choices[0].delta.content = None
mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_1.choices[0].delta.tool_calls[0].index = 0
mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123"
mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator"
mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr'
mock_chunk_1.choices[0].finish_reason = None
mock_chunk_1.usage = None
mock_chunk_1.id = "chatcmpl-1"
mock_chunk_2 = MagicMock()
mock_chunk_2.choices = [MagicMock()]
mock_chunk_2.choices[0].delta = MagicMock()
mock_chunk_2.choices[0].delta.content = None
mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_2.choices[0].delta.tool_calls[0].index = 0
mock_chunk_2.choices[0].delta.tool_calls[0].id = None
mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None
mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}'
mock_chunk_2.choices[0].finish_reason = None
mock_chunk_2.usage = None
mock_chunk_2.id = "chatcmpl-1"
mock_chunk_3 = MagicMock()
mock_chunk_3.choices = [MagicMock()]
mock_chunk_3.choices[0].delta = MagicMock()
mock_chunk_3.choices[0].delta.content = None
mock_chunk_3.choices[0].delta.tool_calls = None
mock_chunk_3.choices[0].finish_reason = "tool_calls"
mock_chunk_3.usage = MagicMock()
mock_chunk_3.usage.prompt_tokens = 10
mock_chunk_3.usage.completion_tokens = 5
mock_chunk_3.id = "chatcmpl-1"
with patch.object(
llm.client.chat.completions, "create", return_value=iter([mock_chunk_1, mock_chunk_2, mock_chunk_3])
):
result = llm.call(
messages=[{"role": "user", "content": "Calculate 1+1"}],
tools=[{
"type": "function",
"function": {
"name": "calculator",
"description": "Calculate expression",
"parameters": {"type": "object", "properties": {"expression": {"type": "string"}}},
},
}],
available_functions=None,
)
assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}"
assert len(result) == 1
assert result[0]["function"]["name"] == "calculator"
assert result[0]["function"]["arguments"] == '{"expression": "1+1"}'
assert result[0]["id"] == "call_abc123"
assert result[0]["type"] == "function"
@pytest.mark.asyncio
async def test_openai_async_streaming_returns_tool_calls_without_available_functions():
"""Test that async streaming returns tool calls list when available_functions is None.
Same as the sync test but for the async path (_ahandle_streaming_completion).
"""
llm = LLM(model="openai/gpt-4o-mini", stream=True)
mock_chunk_1 = MagicMock()
mock_chunk_1.choices = [MagicMock()]
mock_chunk_1.choices[0].delta = MagicMock()
mock_chunk_1.choices[0].delta.content = None
mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_1.choices[0].delta.tool_calls[0].index = 0
mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123"
mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator"
mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr'
mock_chunk_1.choices[0].finish_reason = None
mock_chunk_1.usage = None
mock_chunk_1.id = "chatcmpl-1"
mock_chunk_2 = MagicMock()
mock_chunk_2.choices = [MagicMock()]
mock_chunk_2.choices[0].delta = MagicMock()
mock_chunk_2.choices[0].delta.content = None
mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_2.choices[0].delta.tool_calls[0].index = 0
mock_chunk_2.choices[0].delta.tool_calls[0].id = None
mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None
mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}'
mock_chunk_2.choices[0].finish_reason = None
mock_chunk_2.usage = None
mock_chunk_2.id = "chatcmpl-1"
mock_chunk_3 = MagicMock()
mock_chunk_3.choices = [MagicMock()]
mock_chunk_3.choices[0].delta = MagicMock()
mock_chunk_3.choices[0].delta.content = None
mock_chunk_3.choices[0].delta.tool_calls = None
mock_chunk_3.choices[0].finish_reason = "tool_calls"
mock_chunk_3.usage = MagicMock()
mock_chunk_3.usage.prompt_tokens = 10
mock_chunk_3.usage.completion_tokens = 5
mock_chunk_3.id = "chatcmpl-1"
class MockAsyncStream:
"""Async iterator that mimics OpenAI's async streaming response."""
def __init__(self, chunks: list[Any]) -> None:
self._chunks = chunks
self._index = 0
def __aiter__(self) -> "MockAsyncStream":
return self
async def __anext__(self) -> Any:
if self._index >= len(self._chunks):
raise StopAsyncIteration
chunk = self._chunks[self._index]
self._index += 1
return chunk
async def mock_create(**kwargs: Any) -> MockAsyncStream:
return MockAsyncStream([mock_chunk_1, mock_chunk_2, mock_chunk_3])
with patch.object(
llm.async_client.chat.completions, "create", side_effect=mock_create
):
result = await llm.acall(
messages=[{"role": "user", "content": "Calculate 1+1"}],
tools=[{
"type": "function",
"function": {
"name": "calculator",
"description": "Calculate expression",
"parameters": {"type": "object", "properties": {"expression": {"type": "string"}}},
},
}],
available_functions=None,
)
assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}"
assert len(result) == 1
assert result[0]["function"]["name"] == "calculator"
assert result[0]["function"]["arguments"] == '{"expression": "1+1"}'
assert result[0]["id"] == "call_abc123"
assert result[0]["type"] == "function"

View File

@@ -0,0 +1,155 @@
"""Tests to validate markdown table formatting in documentation files.
These tests ensure that markdown tables in the documentation are well-formed
and render correctly. Specifically, they check that:
- All rows have the same number of columns as the header
- Pipe characters inside table cells are properly escaped
- Separator rows match the header column count
"""
from __future__ import annotations
import re
from pathlib import Path
import pytest
DOCS_DIR = Path(__file__).resolve().parents[3] / "docs"
DOCS_TABLE_FILES = [
"en/concepts/tasks.mdx",
"pt-BR/concepts/tasks.mdx",
"ko/concepts/tasks.mdx",
]
def _split_table_row(line: str) -> list[str]:
"""Split a markdown table row on unescaped pipe characters.
Escaped pipes (``\\|``) are preserved as literal ``|`` inside cells.
"""
cells = re.split(r"(?<!\\)\|", line)
return [cell.replace("\\|", "|").strip() for cell in cells]
def _parse_markdown_tables(content: str) -> list[tuple[int, list[list[str]]]]:
"""Parse all markdown tables from content.
Returns a list of (start_line_number, table_rows) tuples.
Each table_rows is a list of rows, where each row is a list of cell values.
"""
lines = content.split("\n")
tables: list[tuple[int, list[list[str]]]] = []
current_table: list[list[str]] = []
table_start = 0
for i, line in enumerate(lines):
stripped = line.strip()
if stripped.startswith("|") and stripped.endswith("|"):
if not current_table:
table_start = i + 1
cells = _split_table_row(stripped)
cells = cells[1:-1]
current_table.append(cells)
else:
if current_table:
tables.append((table_start, current_table))
current_table = []
if current_table:
tables.append((table_start, current_table))
return tables
def _is_separator_row(cells: list[str]) -> bool:
"""Check if a row is a table separator (e.g., | :--- | :--- |)."""
return all(re.match(r"^:?-+:?$", cell.strip()) for cell in cells if cell.strip())
@pytest.mark.parametrize("doc_path", DOCS_TABLE_FILES)
def test_markdown_tables_have_consistent_columns(doc_path: str) -> None:
"""Verify all rows in each markdown table have the same number of columns."""
full_path = DOCS_DIR / doc_path
if not full_path.exists():
pytest.skip(f"Doc file not found: {full_path}")
content = full_path.read_text(encoding="utf-8")
tables = _parse_markdown_tables(content)
for table_start, rows in tables:
if len(rows) < 2:
continue
header_col_count = len(rows[0])
for row_idx, row in enumerate(rows[1:], start=1):
assert len(row) == header_col_count, (
f"Table at line {table_start} in {doc_path}: "
f"row {row_idx + 1} has {len(row)} columns, expected {header_col_count}. "
f"Row content: {'|'.join(row)}"
)
@pytest.mark.parametrize("doc_path", DOCS_TABLE_FILES)
def test_task_attributes_table_has_no_unescaped_pipes_in_cells(doc_path: str) -> None:
"""Verify the Task Attributes table doesn't have unescaped pipe chars in cells.
The '|' character is the column delimiter in markdown tables. If a type
annotation like `List[Callable] | List[str]` contains an unescaped pipe,
it will be interpreted as a column separator and break the table layout.
"""
full_path = DOCS_DIR / doc_path
if not full_path.exists():
pytest.skip(f"Doc file not found: {full_path}")
content = full_path.read_text(encoding="utf-8")
lines = content.split("\n")
in_task_attrs = False
for i, line in enumerate(lines):
if "## Task Attributes" in line or "## Atributos da Tarefa" in line:
in_task_attrs = True
continue
if in_task_attrs and line.startswith("##"):
break
if in_task_attrs and line.strip().startswith("|") and line.strip().endswith("|"):
stripped = line.strip()
cells = stripped.split("|")
cells = cells[1:-1]
if _is_separator_row(cells):
continue
for cell_idx, cell in enumerate(cells):
unescaped_pipes = re.findall(r"(?<!\\)\|", cell)
assert not unescaped_pipes, (
f"Line {i + 1} in {doc_path}, cell {cell_idx + 1}: "
f"found unescaped pipe character in cell content: '{cell.strip()}'. "
f"Use '\\|' to escape pipe characters inside table cells."
)
@pytest.mark.parametrize("doc_path", DOCS_TABLE_FILES)
def test_task_attributes_table_separator_matches_header(doc_path: str) -> None:
"""Verify the separator row has the same number of columns as the header."""
full_path = DOCS_DIR / doc_path
if not full_path.exists():
pytest.skip(f"Doc file not found: {full_path}")
content = full_path.read_text(encoding="utf-8")
tables = _parse_markdown_tables(content)
for table_start, rows in tables:
if len(rows) < 2:
continue
header = rows[0]
separator = rows[1]
if _is_separator_row(separator):
assert len(separator) == len(header), (
f"Table at line {table_start} in {doc_path}: "
f"separator has {len(separator)} columns but header has {len(header)}. "
f"This usually means the header or separator row has extra '|' delimiters."
)

View File

@@ -6,7 +6,7 @@ readme = "README.md"
authors = [
{ name = "Greyson R. LaLonde", email = "greyson@crewai.com" },
]
requires-python = ">=3.10,<3.15"
requires-python = ">=3.10, <3.14"
classifiers = ["Private :: Do Not Upload"]
private = true
dependencies = [

View File

@@ -1,7 +1,7 @@
name = "crewai-workspace"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
readme = "README.md"
requires-python = ">=3.10,<3.15"
requires-python = ">=3.10,<3.14"
authors = [
{ name = "Joao Moura", email = "joao@crewai.com" }
]
@@ -143,11 +143,6 @@ python_classes = "Test*"
python_functions = "test_*"
[tool.uv]
constraint-dependencies = [
"onnxruntime<1.24; python_version < '3.11'",
]
[tool.uv.workspace]
members = [
"lib/crewai",

4637
uv.lock generated

File diff suppressed because it is too large Load Diff