Compare commits

..

11 Commits

Author SHA1 Message Date
Greyson LaLonde
2652dfecb3 fix: constrain unstructured to ensure python compat 2026-02-11 12:58:55 -05:00
Greyson LaLonde
a71e5db75f feat: update lib bounds; update actions 2026-02-11 11:11:45 -05:00
Greyson LaLonde
3963bb3986 Merge branch 'gl/feat/allow-python314' of https://github.com/crewAIInc/crewAI into gl/feat/allow-python314 2026-02-11 11:05:58 -05:00
Greyson LaLonde
82bf8965b0 chore: update actions to allow 3.14 2026-02-11 11:05:48 -05:00
Greyson LaLonde
c4c35502ba Merge branch 'main' into gl/feat/allow-python314 2026-02-11 10:56:37 -05:00
Greyson LaLonde
13478b87ec feat: extend requires-python upper bound to include 3.14.x 2026-02-11 10:52:58 -05:00
Greyson LaLonde
3a22e80764 fix: ensure openai tool call stream is finalized
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
2026-02-11 10:02:31 -05:00
Greyson LaLonde
9b585a934d fix: pass started_event_id to crew 2026-02-11 09:30:07 -05:00
Rip&Tear
46e1b02154 chore: fix codeql coverage and action version (#4454) 2026-02-11 18:20:07 +08:00
Rip&Tear
87675b49fd test: avoid URL substring assertion in brave search test (#4453)
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
2026-02-11 14:32:10 +08:00
Lucas Gomide
a3bee66be8 Address OpenSSL CVE-2025-15467 vulnerability (#4426)
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Build uv cache / build-cache (3.10) (push) Has been cancelled
Build uv cache / build-cache (3.11) (push) Has been cancelled
Build uv cache / build-cache (3.12) (push) Has been cancelled
Build uv cache / build-cache (3.13) (push) Has been cancelled
* fix(security): bump regex from 2024.9.11 to 2026.1.15

Address security vulnerability flagged in regex==2024.9.11

* bump mcp from 1.23.1 to 1.26.0

Address security vulnerability flagged in mcp==1.16.0 (resolved to 1.23.3)
2026-02-10 09:39:35 -08:00
25 changed files with 3189 additions and 2124 deletions

View File

@@ -65,6 +65,8 @@ body:
- '3.10' - '3.10'
- '3.11' - '3.11'
- '3.12' - '3.12'
- '3.13'
- '3.14'
validations: validations:
required: true required: true
- type: input - type: input

View File

@@ -14,13 +14,18 @@ paths-ignore:
- "lib/crewai/src/crewai/experimental/a2a/**" - "lib/crewai/src/crewai/experimental/a2a/**"
paths: paths:
# Include GitHub Actions workflows/composite actions for CodeQL actions analysis
- ".github/workflows/**"
- ".github/actions/**"
# Include all Python source code from workspace packages # Include all Python source code from workspace packages
- "lib/crewai/src/**" - "lib/crewai/src/**"
- "lib/crewai-tools/src/**" - "lib/crewai-tools/src/**"
- "lib/crewai-files/src/**"
- "lib/devtools/src/**" - "lib/devtools/src/**"
# Include tests (but exclude cassettes via paths-ignore) # Include tests (but exclude cassettes via paths-ignore)
- "lib/crewai/tests/**" - "lib/crewai/tests/**"
- "lib/crewai-tools/tests/**" - "lib/crewai-tools/tests/**"
- "lib/crewai-files/tests/**"
- "lib/devtools/tests/**" - "lib/devtools/tests/**"
# Configure specific queries or packs if needed # Configure specific queries or packs if needed

View File

@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
python-version: ["3.10", "3.11", "3.12", "3.13"] python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
steps: steps:
- name: Checkout repository - name: Checkout repository
@@ -32,6 +32,10 @@ jobs:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
enable-cache: false enable-cache: false
- name: Install system build dependencies
if: matrix.python-version == '3.14'
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
- name: Install dependencies and populate cache - name: Install dependencies and populate cache
run: | run: |
echo "Building global UV cache for Python ${{ matrix.python-version }}..." echo "Building global UV cache for Python ${{ matrix.python-version }}..."

View File

@@ -69,7 +69,7 @@ jobs:
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v3 uses: github/codeql-action/init@v4
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }} build-mode: ${{ matrix.build-mode }}
@@ -98,6 +98,6 @@ jobs:
exit 1 exit 1
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3 uses: github/codeql-action/analyze@v4
with: with:
category: "/language:${{matrix.language}}" category: "/language:${{matrix.language}}"

View File

@@ -26,15 +26,15 @@ jobs:
~/.cache/uv ~/.cache/uv
~/.local/share/uv ~/.local/share/uv
.venv .venv
key: uv-main-py3.11-${{ hashFiles('uv.lock') }} key: uv-main-py3.12-${{ hashFiles('uv.lock') }}
restore-keys: | restore-keys: |
uv-main-py3.11- uv-main-py3.12-
- name: Install uv - name: Install uv
uses: astral-sh/setup-uv@v6 uses: astral-sh/setup-uv@v6
with: with:
version: "0.8.4" version: "0.8.4"
python-version: "3.11" python-version: "3.12"
enable-cache: false enable-cache: false
- name: Install dependencies - name: Install dependencies
@@ -66,4 +66,4 @@ jobs:
~/.cache/uv ~/.cache/uv
~/.local/share/uv ~/.local/share/uv
.venv .venv
key: uv-main-py3.11-${{ hashFiles('uv.lock') }} key: uv-main-py3.12-${{ hashFiles('uv.lock') }}

View File

@@ -13,8 +13,8 @@ jobs:
strategy: strategy:
fail-fast: true fail-fast: true
matrix: matrix:
python-version: ['3.10', '3.11', '3.12', '3.13'] python-version: ['3.10', '3.11', '3.12', '3.13', '3.14']
group: [1, 2, 3, 4, 5, 6, 7, 8] group: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -40,6 +40,10 @@ jobs:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
enable-cache: false enable-cache: false
- name: Install system build dependencies
if: matrix.python-version == '3.14'
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
- name: Install the project - name: Install the project
run: uv sync --all-groups --all-extras run: uv sync --all-groups --all-extras
@@ -49,7 +53,7 @@ jobs:
path: .test_durations_py* path: .test_durations_py*
key: test-durations-py${{ matrix.python-version }} key: test-durations-py${{ matrix.python-version }}
- name: Run tests (group ${{ matrix.group }} of 8) - name: Run tests (group ${{ matrix.group }} of 16)
run: | run: |
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_') PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
DURATION_FILE="../../.test_durations_py${PYTHON_VERSION_SAFE}" DURATION_FILE="../../.test_durations_py${PYTHON_VERSION_SAFE}"
@@ -73,17 +77,17 @@ jobs:
cd lib/crewai && uv run pytest \ cd lib/crewai && uv run pytest \
-vv \ -vv \
--splits 8 \ --splits 16 \
--group ${{ matrix.group }} \ --group ${{ matrix.group }} \
$DURATIONS_ARG \ $DURATIONS_ARG \
--durations=10 \ --durations=10 \
--maxfail=3 --maxfail=3
- name: Run tool tests (group ${{ matrix.group }} of 8) - name: Run tool tests (group ${{ matrix.group }} of 16)
run: | run: |
cd lib/crewai-tools && uv run pytest \ cd lib/crewai-tools && uv run pytest \
-vv \ -vv \
--splits 8 \ --splits 16 \
--group ${{ matrix.group }} \ --group ${{ matrix.group }} \
--durations=10 \ --durations=10 \
--maxfail=3 --maxfail=3

View File

@@ -12,7 +12,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
python-version: ["3.10", "3.11", "3.12", "3.13"] python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
steps: steps:
- name: Checkout code - name: Checkout code
@@ -39,6 +39,10 @@ jobs:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
enable-cache: false enable-cache: false
- name: Install system build dependencies
if: matrix.python-version == '3.14'
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
- name: Install dependencies - name: Install dependencies
run: uv sync --all-groups --all-extras run: uv sync --all-groups --all-extras

View File

@@ -16,11 +16,11 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
python-version: ['3.10', '3.11', '3.12', '3.13'] python-version: ['3.10', '3.11', '3.12', '3.13', '3.14']
env: env:
OPENAI_API_KEY: fake-api-key OPENAI_API_KEY: fake-api-key
PYTHONUNBUFFERED: 1 PYTHONUNBUFFERED: 1
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -44,6 +44,10 @@ jobs:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
enable-cache: false enable-cache: false
- name: Install system build dependencies
if: matrix.python-version == '3.14'
run: sudo apt-get update && sudo apt-get install -y libxml2-dev libxslt-dev libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev libheif-dev
- name: Install the project - name: Install the project
run: uv sync --all-groups --all-extras run: uv sync --all-groups --all-extras
@@ -68,4 +72,4 @@ jobs:
~/.cache/uv ~/.cache/uv
~/.local/share/uv ~/.local/share/uv
.venv .venv
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }} key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}

View File

@@ -45,26 +45,26 @@ crew = Crew(
## Task Attributes ## Task Attributes
| Attribute | Parameters | Type | Description | | Attribute | Parameters | Type | Description |
| :------------------------------------- | :---------------------- | :-------------------------------------- | :-------------------------------------------------------------------------------------------------------------- | | :------------------------------------- | :---------------------- | :-------------------------- | :-------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- |
| **Description** | `description` | `str` | A clear, concise statement of what the task entails. | | **Description** | `description` | `str` | A clear, concise statement of what the task entails. |
| **Expected Output** | `expected_output` | `str` | A detailed description of what the task's completion looks like. | | **Expected Output** | `expected_output` | `str` | A detailed description of what the task's completion looks like. |
| **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. | | **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. |
| **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. | | **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. |
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. | | **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. |
| **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Other tasks whose outputs will be used as context for this task. | | **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Other tasks whose outputs will be used as context for this task. |
| **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | Whether the task should be executed asynchronously. Defaults to False. | | **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | Whether the task should be executed asynchronously. Defaults to False. |
| **Human Input** _(optional)_ | `human_input` | `Optional[bool]` | Whether the task should have a human review the final answer of the agent. Defaults to False. | | **Human Input** _(optional)_ | `human_input` | `Optional[bool]` | Whether the task should have a human review the final answer of the agent. Defaults to False. |
| **Markdown** _(optional)_ | `markdown` | `Optional[bool]` | Whether the task should instruct the agent to return the final answer formatted in Markdown. Defaults to False. | | **Markdown** _(optional)_ | `markdown` | `Optional[bool]` | Whether the task should instruct the agent to return the final answer formatted in Markdown. Defaults to False. |
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. | | **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. |
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. | | **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. |
| **Create Directory** _(optional)_ | `create_directory` | `Optional[bool]` | Whether to create the directory for output_file if it doesn't exist. Defaults to True. | | **Create Directory** _(optional)_ | `create_directory` | `Optional[bool]` | Whether to create the directory for output_file if it doesn't exist. Defaults to True. |
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. | | **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. |
| **Output Pydantic** _(optional)_ | `output_pydantic` | `Optional[Type[BaseModel]]` | A Pydantic model for task output. | | **Output Pydantic** _(optional)_ | `output_pydantic` | `Optional[Type[BaseModel]]` | A Pydantic model for task output. |
| **Callback** _(optional)_ | `callback` | `Optional[Any]` | Function/object to be executed after task completion. | | **Callback** _(optional)_ | `callback` | `Optional[Any]` | Function/object to be executed after task completion. |
| **Guardrail** _(optional)_ | `guardrail` | `Optional[Callable]` | Function to validate task output before proceeding to next task. | | **Guardrail** _(optional)_ | `guardrail` | `Optional[Callable]` | Function to validate task output before proceeding to next task. |
| **Guardrails** _(optional)_ | `guardrails` | `Optional[List[Callable] \| List[str]]` | List of guardrails to validate task output before proceeding to next task. | | **Guardrails** _(optional)_ | `guardrails` | `Optional[List[Callable] | List[str]]` | List of guardrails to validate task output before proceeding to next task. |
| **Guardrail Max Retries** _(optional)_ | `guardrail_max_retries` | `Optional[int]` | Maximum number of retries when guardrail validation fails. Defaults to 3. | | **Guardrail Max Retries** _(optional)_ | `guardrail_max_retries` | `Optional[int]` | Maximum number of retries when guardrail validation fails. Defaults to 3. |
<Note type="warning" title="Deprecated: max_retries"> <Note type="warning" title="Deprecated: max_retries">
The task attribute `max_retries` is deprecated and will be removed in v1.0.0. The task attribute `max_retries` is deprecated and will be removed in v1.0.0.

View File

@@ -6,7 +6,7 @@ readme = "README.md"
authors = [ authors = [
{ name = "Greyson LaLonde", email = "greyson@crewai.com" } { name = "Greyson LaLonde", email = "greyson@crewai.com" }
] ]
requires-python = ">=3.10, <3.14" requires-python = ">=3.10,<3.15"
dependencies = [ dependencies = [
"Pillow~=10.4.0", "Pillow~=10.4.0",
"pypdf~=4.0.0", "pypdf~=4.0.0",

View File

@@ -6,7 +6,7 @@ readme = "README.md"
authors = [ authors = [
{ name = "João Moura", email = "joaomdmoura@gmail.com" }, { name = "João Moura", email = "joaomdmoura@gmail.com" },
] ]
requires-python = ">=3.10, <3.14" requires-python = ">=3.10,<3.15"
dependencies = [ dependencies = [
"lancedb~=0.5.4", "lancedb~=0.5.4",
"pytube~=15.0.0", "pytube~=15.0.0",
@@ -118,7 +118,7 @@ rag = [
"lxml>=5.3.0,<5.4.0", # Pin to avoid etree import issues in 5.4.0 "lxml>=5.3.0,<5.4.0", # Pin to avoid etree import issues in 5.4.0
] ]
xml = [ xml = [
"unstructured[local-inference, all-docs]>=0.17.2" "unstructured[local-inference, all-docs]>=0.17.2,<0.18.31"
] ]
oxylabs = [ oxylabs = [
"oxylabs==2.0.0" "oxylabs==2.0.0"

View File

@@ -33,8 +33,11 @@ def test_brave_tool_search(mock_get, brave_tool):
mock_get.return_value.json.return_value = mock_response mock_get.return_value.json.return_value = mock_response
result = brave_tool.run(query="test") result = brave_tool.run(query="test")
assert "Test Title" in result data = json.loads(result)
assert "http://test.com" in result assert isinstance(data, list)
assert len(data) >= 1
assert data[0]["title"] == "Test Title"
assert data[0]["url"] == "http://test.com"
@patch("requests.get") @patch("requests.get")

View File

@@ -6,7 +6,7 @@ readme = "README.md"
authors = [ authors = [
{ name = "Joao Moura", email = "joao@crewai.com" } { name = "Joao Moura", email = "joao@crewai.com" }
] ]
requires-python = ">=3.10, <3.14" requires-python = ">=3.10,<3.15"
dependencies = [ dependencies = [
# Core Dependencies # Core Dependencies
"pydantic~=2.11.9", "pydantic~=2.11.9",
@@ -14,7 +14,7 @@ dependencies = [
"instructor>=1.3.3", "instructor>=1.3.3",
# Text Processing # Text Processing
"pdfplumber~=0.11.4", "pdfplumber~=0.11.4",
"regex~=2024.9.11", "regex~=2026.1.15",
# Telemetry and Monitoring # Telemetry and Monitoring
"opentelemetry-api~=1.34.0", "opentelemetry-api~=1.34.0",
"opentelemetry-sdk~=1.34.0", "opentelemetry-sdk~=1.34.0",
@@ -36,7 +36,7 @@ dependencies = [
"json5~=0.10.0", "json5~=0.10.0",
"portalocker~=2.7.0", "portalocker~=2.7.0",
"pydantic-settings~=2.10.1", "pydantic-settings~=2.10.1",
"mcp~=1.23.1", "mcp~=1.26.0",
"uv~=0.9.13", "uv~=0.9.13",
"aiosqlite~=0.21.0", "aiosqlite~=0.21.0",
] ]

View File

@@ -3,7 +3,7 @@ name = "{{folder_name}}"
version = "0.1.0" version = "0.1.0"
description = "{{name}} using crewAI" description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }] authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14" requires-python = ">=3.10,<3.15"
dependencies = [ dependencies = [
"crewai[tools]==1.9.3" "crewai[tools]==1.9.3"
] ]

View File

@@ -3,7 +3,7 @@ name = "{{folder_name}}"
version = "0.1.0" version = "0.1.0"
description = "{{name}} using crewAI" description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }] authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14" requires-python = ">=3.10,<3.15"
dependencies = [ dependencies = [
"crewai[tools]==1.9.3" "crewai[tools]==1.9.3"
] ]

View File

@@ -3,9 +3,9 @@ name = "{{folder_name}}"
version = "0.1.0" version = "0.1.0"
description = "Power up your crews with {{folder_name}}" description = "Power up your crews with {{folder_name}}"
readme = "README.md" readme = "README.md"
requires-python = ">=3.10,<3.14" requires-python = ">=3.10,<3.15"
dependencies = [ dependencies = [
"crewai[tools]>=0.203.1" "crewai[tools]==1.9.3"
] ]
[tool.crewai] [tool.crewai]

View File

@@ -187,6 +187,7 @@ class Crew(FlowTrackable, BaseModel):
_task_output_handler: TaskOutputStorageHandler = PrivateAttr( _task_output_handler: TaskOutputStorageHandler = PrivateAttr(
default_factory=TaskOutputStorageHandler default_factory=TaskOutputStorageHandler
) )
_kickoff_event_id: str | None = PrivateAttr(default=None)
name: str | None = Field(default="crew") name: str | None = Field(default="crew")
cache: bool = Field(default=True) cache: bool = Field(default=True)
@@ -759,7 +760,11 @@ class Crew(FlowTrackable, BaseModel):
except Exception as e: except Exception as e:
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,
CrewKickoffFailedEvent(error=str(e), crew_name=self.name), CrewKickoffFailedEvent(
error=str(e),
crew_name=self.name,
started_event_id=self._kickoff_event_id,
),
) )
raise raise
finally: finally:
@@ -949,7 +954,11 @@ class Crew(FlowTrackable, BaseModel):
except Exception as e: except Exception as e:
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,
CrewKickoffFailedEvent(error=str(e), crew_name=self.name), CrewKickoffFailedEvent(
error=str(e),
crew_name=self.name,
started_event_id=self._kickoff_event_id,
),
) )
raise raise
finally: finally:
@@ -1524,6 +1533,7 @@ class Crew(FlowTrackable, BaseModel):
crew_name=self.name, crew_name=self.name,
output=final_task_output, output=final_task_output,
total_tokens=self.token_usage.total_tokens, total_tokens=self.token_usage.total_tokens,
started_event_id=self._kickoff_event_id,
), ),
) )

View File

@@ -265,10 +265,9 @@ def prepare_kickoff(
normalized = {} normalized = {}
normalized = before_callback(normalized) normalized = before_callback(normalized)
future = crewai_event_bus.emit( started_event = CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized)
crew, crew._kickoff_event_id = started_event.event_id
CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized), future = crewai_event_bus.emit(crew, started_event)
)
if future is not None: if future is not None:
try: try:
future.result() future.result()

View File

@@ -1696,6 +1696,99 @@ class OpenAICompletion(BaseLLM):
return content return content
def _finalize_streaming_response(
self,
full_response: str,
tool_calls: dict[int, dict[str, Any]],
usage_data: dict[str, int],
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
) -> str | list[dict[str, Any]]:
"""Finalize a streaming response with usage tracking, tool call handling, and events.
Args:
full_response: The accumulated text response from the stream.
tool_calls: Accumulated tool calls from the stream, keyed by index.
usage_data: Token usage data from the stream.
params: The completion parameters containing messages.
available_functions: Available functions for tool calling.
from_task: Task that initiated the call.
from_agent: Agent that initiated the call.
Returns:
Tool calls list when tools were invoked without available_functions,
tool execution result when available_functions is provided,
or the text response string.
"""
self._track_token_usage_internal(usage_data)
if tool_calls and not available_functions:
tool_calls_list = [
{
"id": call_data["id"],
"type": "function",
"function": {
"name": call_data["name"],
"arguments": call_data["arguments"],
},
"index": call_data["index"],
}
for call_data in tool_calls.values()
]
self._emit_call_completed_event(
response=tool_calls_list,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return tool_calls_list
if tool_calls and available_functions:
for call_data in tool_calls.values():
function_name = call_data["name"]
arguments = call_data["arguments"]
if not function_name or not arguments:
continue
if function_name not in available_functions:
logging.warning(
f"Function '{function_name}' not found in available functions"
)
continue
try:
function_args = json.loads(arguments)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse streamed tool arguments: {e}")
continue
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return full_response
def _handle_streaming_completion( def _handle_streaming_completion(
self, self,
params: dict[str, Any], params: dict[str, Any],
@@ -1703,7 +1796,7 @@ class OpenAICompletion(BaseLLM):
from_task: Any | None = None, from_task: Any | None = None,
from_agent: Any | None = None, from_agent: Any | None = None,
response_model: type[BaseModel] | None = None, response_model: type[BaseModel] | None = None,
) -> str | BaseModel: ) -> str | list[dict[str, Any]] | BaseModel:
"""Handle streaming chat completion.""" """Handle streaming chat completion."""
full_response = "" full_response = ""
tool_calls: dict[int, dict[str, Any]] = {} tool_calls: dict[int, dict[str, Any]] = {}
@@ -1820,54 +1913,20 @@ class OpenAICompletion(BaseLLM):
response_id=response_id_stream, response_id=response_id_stream,
) )
self._track_token_usage_internal(usage_data) result = self._finalize_streaming_response(
full_response=full_response,
if tool_calls and available_functions: tool_calls=tool_calls,
for call_data in tool_calls.values(): usage_data=usage_data,
function_name = call_data["name"] params=params,
arguments = call_data["arguments"] available_functions=available_functions,
# Skip if function name is empty or arguments are empty
if not function_name or not arguments:
continue
# Check if function exists in available functions
if function_name not in available_functions:
logging.warning(
f"Function '{function_name}' not found in available functions"
)
continue
try:
function_args = json.loads(arguments)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse streamed tool arguments: {e}")
continue
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task, from_task=from_task,
from_agent=from_agent, from_agent=from_agent,
messages=params["messages"],
)
return self._invoke_after_llm_call_hooks(
params["messages"], full_response, from_agent
) )
if isinstance(result, str):
return self._invoke_after_llm_call_hooks(
params["messages"], result, from_agent
)
return result
async def _ahandle_completion( async def _ahandle_completion(
self, self,
@@ -2016,7 +2075,7 @@ class OpenAICompletion(BaseLLM):
from_task: Any | None = None, from_task: Any | None = None,
from_agent: Any | None = None, from_agent: Any | None = None,
response_model: type[BaseModel] | None = None, response_model: type[BaseModel] | None = None,
) -> str | BaseModel: ) -> str | list[dict[str, Any]] | BaseModel:
"""Handle async streaming chat completion.""" """Handle async streaming chat completion."""
full_response = "" full_response = ""
tool_calls: dict[int, dict[str, Any]] = {} tool_calls: dict[int, dict[str, Any]] = {}
@@ -2142,51 +2201,16 @@ class OpenAICompletion(BaseLLM):
response_id=response_id_stream, response_id=response_id_stream,
) )
self._track_token_usage_internal(usage_data) return self._finalize_streaming_response(
full_response=full_response,
if tool_calls and available_functions: tool_calls=tool_calls,
for call_data in tool_calls.values(): usage_data=usage_data,
function_name = call_data["name"] params=params,
arguments = call_data["arguments"] available_functions=available_functions,
if not function_name or not arguments:
continue
if function_name not in available_functions:
logging.warning(
f"Function '{function_name}' not found in available functions"
)
continue
try:
function_args = json.loads(arguments)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse streamed tool arguments: {e}")
continue
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task, from_task=from_task,
from_agent=from_agent, from_agent=from_agent,
messages=params["messages"],
) )
return full_response
def supports_function_calling(self) -> bool: def supports_function_calling(self) -> bool:
"""Check if the model supports function calling.""" """Check if the model supports function calling."""
return not self.is_o1_model return not self.is_o1_model

View File

@@ -230,7 +230,7 @@ class TestDeployCommand(unittest.TestCase):
[project] [project]
name = "test_project" name = "test_project"
version = "0.1.0" version = "0.1.0"
requires-python = ">=3.10,<3.14" requires-python = ">=3.10,<3.15"
dependencies = ["crewai"] dependencies = ["crewai"]
""", """,
) )
@@ -249,7 +249,7 @@ class TestDeployCommand(unittest.TestCase):
[project] [project]
name = "test_project" name = "test_project"
version = "0.1.0" version = "0.1.0"
requires-python = ">=3.10,<3.14" requires-python = ">=3.10,<3.15"
dependencies = ["crewai"] dependencies = ["crewai"]
""", """,
) )

View File

@@ -1,6 +1,7 @@
import os import os
import sys import sys
import types import types
from typing import Any
from unittest.mock import patch, MagicMock from unittest.mock import patch, MagicMock
import openai import openai
import pytest import pytest
@@ -1578,3 +1579,167 @@ def test_openai_structured_output_preserves_json_with_stop_word_patterns():
assert "Action:" in result.action_taken assert "Action:" in result.action_taken
assert "Observation:" in result.observation_result assert "Observation:" in result.observation_result
assert "Final Answer:" in result.final_answer assert "Final Answer:" in result.final_answer
def test_openai_streaming_returns_tool_calls_without_available_functions():
"""Test that streaming returns tool calls list when available_functions is None.
This mirrors the non-streaming path where tool_calls are returned for
the executor to handle. Reproduces the bug where streaming with tool
calls would return empty text instead of tool_calls when
available_functions was not provided (as the crew executor does).
"""
llm = LLM(model="openai/gpt-4o-mini", stream=True)
mock_chunk_1 = MagicMock()
mock_chunk_1.choices = [MagicMock()]
mock_chunk_1.choices[0].delta = MagicMock()
mock_chunk_1.choices[0].delta.content = None
mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_1.choices[0].delta.tool_calls[0].index = 0
mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123"
mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator"
mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr'
mock_chunk_1.choices[0].finish_reason = None
mock_chunk_1.usage = None
mock_chunk_1.id = "chatcmpl-1"
mock_chunk_2 = MagicMock()
mock_chunk_2.choices = [MagicMock()]
mock_chunk_2.choices[0].delta = MagicMock()
mock_chunk_2.choices[0].delta.content = None
mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_2.choices[0].delta.tool_calls[0].index = 0
mock_chunk_2.choices[0].delta.tool_calls[0].id = None
mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None
mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}'
mock_chunk_2.choices[0].finish_reason = None
mock_chunk_2.usage = None
mock_chunk_2.id = "chatcmpl-1"
mock_chunk_3 = MagicMock()
mock_chunk_3.choices = [MagicMock()]
mock_chunk_3.choices[0].delta = MagicMock()
mock_chunk_3.choices[0].delta.content = None
mock_chunk_3.choices[0].delta.tool_calls = None
mock_chunk_3.choices[0].finish_reason = "tool_calls"
mock_chunk_3.usage = MagicMock()
mock_chunk_3.usage.prompt_tokens = 10
mock_chunk_3.usage.completion_tokens = 5
mock_chunk_3.id = "chatcmpl-1"
with patch.object(
llm.client.chat.completions, "create", return_value=iter([mock_chunk_1, mock_chunk_2, mock_chunk_3])
):
result = llm.call(
messages=[{"role": "user", "content": "Calculate 1+1"}],
tools=[{
"type": "function",
"function": {
"name": "calculator",
"description": "Calculate expression",
"parameters": {"type": "object", "properties": {"expression": {"type": "string"}}},
},
}],
available_functions=None,
)
assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}"
assert len(result) == 1
assert result[0]["function"]["name"] == "calculator"
assert result[0]["function"]["arguments"] == '{"expression": "1+1"}'
assert result[0]["id"] == "call_abc123"
assert result[0]["type"] == "function"
@pytest.mark.asyncio
async def test_openai_async_streaming_returns_tool_calls_without_available_functions():
"""Test that async streaming returns tool calls list when available_functions is None.
Same as the sync test but for the async path (_ahandle_streaming_completion).
"""
llm = LLM(model="openai/gpt-4o-mini", stream=True)
mock_chunk_1 = MagicMock()
mock_chunk_1.choices = [MagicMock()]
mock_chunk_1.choices[0].delta = MagicMock()
mock_chunk_1.choices[0].delta.content = None
mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_1.choices[0].delta.tool_calls[0].index = 0
mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123"
mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator"
mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr'
mock_chunk_1.choices[0].finish_reason = None
mock_chunk_1.usage = None
mock_chunk_1.id = "chatcmpl-1"
mock_chunk_2 = MagicMock()
mock_chunk_2.choices = [MagicMock()]
mock_chunk_2.choices[0].delta = MagicMock()
mock_chunk_2.choices[0].delta.content = None
mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_2.choices[0].delta.tool_calls[0].index = 0
mock_chunk_2.choices[0].delta.tool_calls[0].id = None
mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None
mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}'
mock_chunk_2.choices[0].finish_reason = None
mock_chunk_2.usage = None
mock_chunk_2.id = "chatcmpl-1"
mock_chunk_3 = MagicMock()
mock_chunk_3.choices = [MagicMock()]
mock_chunk_3.choices[0].delta = MagicMock()
mock_chunk_3.choices[0].delta.content = None
mock_chunk_3.choices[0].delta.tool_calls = None
mock_chunk_3.choices[0].finish_reason = "tool_calls"
mock_chunk_3.usage = MagicMock()
mock_chunk_3.usage.prompt_tokens = 10
mock_chunk_3.usage.completion_tokens = 5
mock_chunk_3.id = "chatcmpl-1"
class MockAsyncStream:
"""Async iterator that mimics OpenAI's async streaming response."""
def __init__(self, chunks: list[Any]) -> None:
self._chunks = chunks
self._index = 0
def __aiter__(self) -> "MockAsyncStream":
return self
async def __anext__(self) -> Any:
if self._index >= len(self._chunks):
raise StopAsyncIteration
chunk = self._chunks[self._index]
self._index += 1
return chunk
async def mock_create(**kwargs: Any) -> MockAsyncStream:
return MockAsyncStream([mock_chunk_1, mock_chunk_2, mock_chunk_3])
with patch.object(
llm.async_client.chat.completions, "create", side_effect=mock_create
):
result = await llm.acall(
messages=[{"role": "user", "content": "Calculate 1+1"}],
tools=[{
"type": "function",
"function": {
"name": "calculator",
"description": "Calculate expression",
"parameters": {"type": "object", "properties": {"expression": {"type": "string"}}},
},
}],
available_functions=None,
)
assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}"
assert len(result) == 1
assert result[0]["function"]["name"] == "calculator"
assert result[0]["function"]["arguments"] == '{"expression": "1+1"}'
assert result[0]["id"] == "call_abc123"
assert result[0]["type"] == "function"

View File

@@ -1,155 +0,0 @@
"""Tests to validate markdown table formatting in documentation files.
These tests ensure that markdown tables in the documentation are well-formed
and render correctly. Specifically, they check that:
- All rows have the same number of columns as the header
- Pipe characters inside table cells are properly escaped
- Separator rows match the header column count
"""
from __future__ import annotations
import re
from pathlib import Path
import pytest
DOCS_DIR = Path(__file__).resolve().parents[3] / "docs"
DOCS_TABLE_FILES = [
"en/concepts/tasks.mdx",
"pt-BR/concepts/tasks.mdx",
"ko/concepts/tasks.mdx",
]
def _split_table_row(line: str) -> list[str]:
"""Split a markdown table row on unescaped pipe characters.
Escaped pipes (``\\|``) are preserved as literal ``|`` inside cells.
"""
cells = re.split(r"(?<!\\)\|", line)
return [cell.replace("\\|", "|").strip() for cell in cells]
def _parse_markdown_tables(content: str) -> list[tuple[int, list[list[str]]]]:
"""Parse all markdown tables from content.
Returns a list of (start_line_number, table_rows) tuples.
Each table_rows is a list of rows, where each row is a list of cell values.
"""
lines = content.split("\n")
tables: list[tuple[int, list[list[str]]]] = []
current_table: list[list[str]] = []
table_start = 0
for i, line in enumerate(lines):
stripped = line.strip()
if stripped.startswith("|") and stripped.endswith("|"):
if not current_table:
table_start = i + 1
cells = _split_table_row(stripped)
cells = cells[1:-1]
current_table.append(cells)
else:
if current_table:
tables.append((table_start, current_table))
current_table = []
if current_table:
tables.append((table_start, current_table))
return tables
def _is_separator_row(cells: list[str]) -> bool:
"""Check if a row is a table separator (e.g., | :--- | :--- |)."""
return all(re.match(r"^:?-+:?$", cell.strip()) for cell in cells if cell.strip())
@pytest.mark.parametrize("doc_path", DOCS_TABLE_FILES)
def test_markdown_tables_have_consistent_columns(doc_path: str) -> None:
"""Verify all rows in each markdown table have the same number of columns."""
full_path = DOCS_DIR / doc_path
if not full_path.exists():
pytest.skip(f"Doc file not found: {full_path}")
content = full_path.read_text(encoding="utf-8")
tables = _parse_markdown_tables(content)
for table_start, rows in tables:
if len(rows) < 2:
continue
header_col_count = len(rows[0])
for row_idx, row in enumerate(rows[1:], start=1):
assert len(row) == header_col_count, (
f"Table at line {table_start} in {doc_path}: "
f"row {row_idx + 1} has {len(row)} columns, expected {header_col_count}. "
f"Row content: {'|'.join(row)}"
)
@pytest.mark.parametrize("doc_path", DOCS_TABLE_FILES)
def test_task_attributes_table_has_no_unescaped_pipes_in_cells(doc_path: str) -> None:
"""Verify the Task Attributes table doesn't have unescaped pipe chars in cells.
The '|' character is the column delimiter in markdown tables. If a type
annotation like `List[Callable] | List[str]` contains an unescaped pipe,
it will be interpreted as a column separator and break the table layout.
"""
full_path = DOCS_DIR / doc_path
if not full_path.exists():
pytest.skip(f"Doc file not found: {full_path}")
content = full_path.read_text(encoding="utf-8")
lines = content.split("\n")
in_task_attrs = False
for i, line in enumerate(lines):
if "## Task Attributes" in line or "## Atributos da Tarefa" in line:
in_task_attrs = True
continue
if in_task_attrs and line.startswith("##"):
break
if in_task_attrs and line.strip().startswith("|") and line.strip().endswith("|"):
stripped = line.strip()
cells = stripped.split("|")
cells = cells[1:-1]
if _is_separator_row(cells):
continue
for cell_idx, cell in enumerate(cells):
unescaped_pipes = re.findall(r"(?<!\\)\|", cell)
assert not unescaped_pipes, (
f"Line {i + 1} in {doc_path}, cell {cell_idx + 1}: "
f"found unescaped pipe character in cell content: '{cell.strip()}'. "
f"Use '\\|' to escape pipe characters inside table cells."
)
@pytest.mark.parametrize("doc_path", DOCS_TABLE_FILES)
def test_task_attributes_table_separator_matches_header(doc_path: str) -> None:
"""Verify the separator row has the same number of columns as the header."""
full_path = DOCS_DIR / doc_path
if not full_path.exists():
pytest.skip(f"Doc file not found: {full_path}")
content = full_path.read_text(encoding="utf-8")
tables = _parse_markdown_tables(content)
for table_start, rows in tables:
if len(rows) < 2:
continue
header = rows[0]
separator = rows[1]
if _is_separator_row(separator):
assert len(separator) == len(header), (
f"Table at line {table_start} in {doc_path}: "
f"separator has {len(separator)} columns but header has {len(header)}. "
f"This usually means the header or separator row has extra '|' delimiters."
)

View File

@@ -6,7 +6,7 @@ readme = "README.md"
authors = [ authors = [
{ name = "Greyson R. LaLonde", email = "greyson@crewai.com" }, { name = "Greyson R. LaLonde", email = "greyson@crewai.com" },
] ]
requires-python = ">=3.10, <3.14" requires-python = ">=3.10,<3.15"
classifiers = ["Private :: Do Not Upload"] classifiers = ["Private :: Do Not Upload"]
private = true private = true
dependencies = [ dependencies = [

View File

@@ -1,7 +1,7 @@
name = "crewai-workspace" name = "crewai-workspace"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks." description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
readme = "README.md" readme = "README.md"
requires-python = ">=3.10,<3.14" requires-python = ">=3.10,<3.15"
authors = [ authors = [
{ name = "Joao Moura", email = "joao@crewai.com" } { name = "Joao Moura", email = "joao@crewai.com" }
] ]
@@ -143,6 +143,11 @@ python_classes = "Test*"
python_functions = "test_*" python_functions = "test_*"
[tool.uv]
constraint-dependencies = [
"onnxruntime<1.24; python_version < '3.11'",
]
[tool.uv.workspace] [tool.uv.workspace]
members = [ members = [
"lib/crewai", "lib/crewai",

4635
uv.lock generated

File diff suppressed because it is too large Load Diff