chore: merge main into gl/chore/refactor-cli

Resolve conflicts from origin/main: relocate new CLI additions
(checkpoint_tui, deploy/validate, remote_template, content_crew
templates) into lib/cli, rewrite imports for the standalone
crewai-cli package, port main's trained_agents_file param and
predeploy validation, and bump python-dotenv/pydantic in
crewai-cli to match crewai's constraints. Add the new
mark_ephemeral_trace_batch_as_failed method to the relocated
crewai.plus_api. Update tests for the new payload field, deploy
--skip-validate kwarg, and crewai_cli import paths.
This commit is contained in:
Greyson Lalonde
2026-05-05 01:50:34 +08:00
1014 changed files with 176672 additions and 17289 deletions

View File

@@ -15,7 +15,14 @@ class TestDeployCommand(unittest.TestCase):
@patch("crewai_cli.command.get_auth_token")
@patch("crewai_cli.deploy.main.get_project_name")
@patch("crewai_cli.command.PlusAPI")
def setUp(self, mock_plus_api, mock_get_project_name, mock_get_auth_token):
@patch.object(DeployCommand, "_validate_project_structure")
def setUp(
self,
mock_validate_structure,
mock_plus_api,
mock_get_project_name,
mock_get_auth_token,
):
self.mock_get_auth_token = mock_get_auth_token
self.mock_get_project_name = mock_get_project_name
self.mock_plus_api = mock_plus_api
@@ -125,7 +132,7 @@ class TestDeployCommand(unittest.TestCase):
mock_response.json.return_value = {"uuid": "test-uuid"}
self.mock_client.deploy_by_uuid.return_value = mock_response
self.deploy_command.deploy(uuid="test-uuid")
self.deploy_command.deploy(uuid="test-uuid", skip_validate=True)
self.mock_client.deploy_by_uuid.assert_called_once_with("test-uuid")
mock_display.assert_called_once_with({"uuid": "test-uuid"})
@@ -137,7 +144,7 @@ class TestDeployCommand(unittest.TestCase):
mock_response.json.return_value = {"uuid": "test-uuid"}
self.mock_client.deploy_by_name.return_value = mock_response
self.deploy_command.deploy()
self.deploy_command.deploy(skip_validate=True)
self.mock_client.deploy_by_name.assert_called_once_with("test_project")
mock_display.assert_called_once_with({"uuid": "test-uuid"})
@@ -156,7 +163,7 @@ class TestDeployCommand(unittest.TestCase):
self.mock_client.create_crew.return_value = mock_response
with patch("sys.stdout", new=StringIO()) as fake_out:
self.deploy_command.create_crew()
self.deploy_command.create_crew(skip_validate=True)
self.assertIn("Deployment created successfully!", fake_out.getvalue())
self.assertIn("new-uuid", fake_out.getvalue())

View File

@@ -0,0 +1,431 @@
"""Tests for `crewai.cli.deploy.validate`.
The fixtures here correspond 1:1 to the deployment-failure patterns observed
in the #crewai-deployment-failures Slack channel that motivated this work.
"""
from __future__ import annotations
from pathlib import Path
from textwrap import dedent
from typing import Iterable
from unittest.mock import patch
import pytest
from crewai_cli.deploy.validate import (
DeployValidator,
Severity,
normalize_package_name,
)
def _make_pyproject(
name: str = "my_crew",
dependencies: Iterable[str] = ("crewai>=1.14.0",),
*,
hatchling: bool = False,
flow: bool = False,
extra: str = "",
) -> str:
deps = ", ".join(f'"{d}"' for d in dependencies)
lines = [
"[project]",
f'name = "{name}"',
'version = "0.1.0"',
f"dependencies = [{deps}]",
]
if hatchling:
lines += [
"",
"[build-system]",
'requires = ["hatchling"]',
'build-backend = "hatchling.build"',
]
if flow:
lines += ["", "[tool.crewai]", 'type = "flow"']
if extra:
lines += ["", extra]
return "\n".join(lines) + "\n"
def _scaffold_standard_crew(
root: Path,
*,
name: str = "my_crew",
include_crew_py: bool = True,
include_agents_yaml: bool = True,
include_tasks_yaml: bool = True,
include_lockfile: bool = True,
pyproject: str | None = None,
) -> Path:
(root / "pyproject.toml").write_text(pyproject or _make_pyproject(name=name))
if include_lockfile:
(root / "uv.lock").write_text("# dummy uv lockfile\n")
pkg_dir = root / "src" / normalize_package_name(name)
pkg_dir.mkdir(parents=True)
(pkg_dir / "__init__.py").write_text("")
if include_crew_py:
(pkg_dir / "crew.py").write_text(
dedent(
"""
from crewai.project import CrewBase, crew
@CrewBase
class MyCrew:
agents_config = "config/agents.yaml"
tasks_config = "config/tasks.yaml"
@crew
def crew(self):
from crewai import Crew
return Crew(agents=[], tasks=[])
"""
).strip()
+ "\n"
)
config_dir = pkg_dir / "config"
config_dir.mkdir()
if include_agents_yaml:
(config_dir / "agents.yaml").write_text("{}\n")
if include_tasks_yaml:
(config_dir / "tasks.yaml").write_text("{}\n")
return pkg_dir
def _codes(validator: DeployValidator) -> set[str]:
return {r.code for r in validator.results}
def _run_without_import_check(root: Path) -> DeployValidator:
"""Run validation with the subprocess-based import check stubbed out;
the classifier is exercised directly in its own tests below."""
with patch.object(DeployValidator, "_check_module_imports", lambda self: None):
v = DeployValidator(project_root=root)
v.run()
return v
@pytest.mark.parametrize(
"project_name, expected",
[
("my-crew", "my_crew"),
("My Cool-Project", "my_cool_project"),
("crew123", "crew123"),
("crew.name!with$chars", "crewnamewithchars"),
],
)
def test_normalize_package_name(project_name: str, expected: str) -> None:
assert normalize_package_name(project_name) == expected
def test_valid_standard_crew_project_passes(tmp_path: Path) -> None:
_scaffold_standard_crew(tmp_path)
v = _run_without_import_check(tmp_path)
assert v.ok, f"expected clean run, got {v.results}"
def test_missing_pyproject_errors(tmp_path: Path) -> None:
v = _run_without_import_check(tmp_path)
assert "missing_pyproject" in _codes(v)
assert not v.ok
def test_invalid_pyproject_errors(tmp_path: Path) -> None:
(tmp_path / "pyproject.toml").write_text("this is not valid toml ====\n")
v = _run_without_import_check(tmp_path)
assert "invalid_pyproject" in _codes(v)
def test_missing_project_name_errors(tmp_path: Path) -> None:
(tmp_path / "pyproject.toml").write_text(
'[project]\nversion = "0.1.0"\ndependencies = ["crewai>=1.14.0"]\n'
)
v = _run_without_import_check(tmp_path)
assert "missing_project_name" in _codes(v)
def test_missing_lockfile_errors(tmp_path: Path) -> None:
_scaffold_standard_crew(tmp_path, include_lockfile=False)
v = _run_without_import_check(tmp_path)
assert "missing_lockfile" in _codes(v)
def test_poetry_lock_is_accepted(tmp_path: Path) -> None:
_scaffold_standard_crew(tmp_path, include_lockfile=False)
(tmp_path / "poetry.lock").write_text("# poetry lockfile\n")
v = _run_without_import_check(tmp_path)
assert "missing_lockfile" not in _codes(v)
def test_stale_lockfile_warns(tmp_path: Path) -> None:
_scaffold_standard_crew(tmp_path)
# Make lockfile older than pyproject.
lock = tmp_path / "uv.lock"
pyproject = tmp_path / "pyproject.toml"
old_time = pyproject.stat().st_mtime - 60
import os
os.utime(lock, (old_time, old_time))
v = _run_without_import_check(tmp_path)
assert "stale_lockfile" in _codes(v)
# Stale is a warning, so the run can still be ok (no errors).
assert v.ok
def test_missing_package_dir_errors(tmp_path: Path) -> None:
# pyproject says name=my_crew but we only create src/other_pkg/
(tmp_path / "pyproject.toml").write_text(_make_pyproject(name="my_crew"))
(tmp_path / "uv.lock").write_text("")
(tmp_path / "src" / "other_pkg").mkdir(parents=True)
v = _run_without_import_check(tmp_path)
codes = _codes(v)
assert "missing_package_dir" in codes
finding = next(r for r in v.results if r.code == "missing_package_dir")
assert "other_pkg" in finding.hint
def test_egg_info_only_errors_with_targeted_hint(tmp_path: Path) -> None:
"""Regression for the case where only src/<name>.egg-info/ exists."""
(tmp_path / "pyproject.toml").write_text(_make_pyproject(name="odoo_pm_agents"))
(tmp_path / "uv.lock").write_text("")
(tmp_path / "src" / "odoo_pm_agents.egg-info").mkdir(parents=True)
v = _run_without_import_check(tmp_path)
finding = next(r for r in v.results if r.code == "missing_package_dir")
assert "egg-info" in finding.hint
def test_stale_egg_info_sibling_warns(tmp_path: Path) -> None:
_scaffold_standard_crew(tmp_path)
(tmp_path / "src" / "my_crew.egg-info").mkdir()
v = _run_without_import_check(tmp_path)
assert "stale_egg_info" in _codes(v)
def test_missing_crew_py_errors(tmp_path: Path) -> None:
_scaffold_standard_crew(tmp_path, include_crew_py=False)
v = _run_without_import_check(tmp_path)
assert "missing_crew_py" in _codes(v)
def test_missing_agents_yaml_errors(tmp_path: Path) -> None:
_scaffold_standard_crew(tmp_path, include_agents_yaml=False)
v = _run_without_import_check(tmp_path)
assert "missing_agents_yaml" in _codes(v)
def test_missing_tasks_yaml_errors(tmp_path: Path) -> None:
_scaffold_standard_crew(tmp_path, include_tasks_yaml=False)
v = _run_without_import_check(tmp_path)
assert "missing_tasks_yaml" in _codes(v)
def test_flow_project_requires_main_py(tmp_path: Path) -> None:
(tmp_path / "pyproject.toml").write_text(
_make_pyproject(name="my_flow", flow=True)
)
(tmp_path / "uv.lock").write_text("")
(tmp_path / "src" / "my_flow").mkdir(parents=True)
v = _run_without_import_check(tmp_path)
assert "missing_flow_main" in _codes(v)
def test_flow_project_with_main_py_passes(tmp_path: Path) -> None:
(tmp_path / "pyproject.toml").write_text(
_make_pyproject(name="my_flow", flow=True)
)
(tmp_path / "uv.lock").write_text("")
pkg = tmp_path / "src" / "my_flow"
pkg.mkdir(parents=True)
(pkg / "main.py").write_text("# flow entrypoint\n")
v = _run_without_import_check(tmp_path)
assert "missing_flow_main" not in _codes(v)
def test_hatchling_without_wheel_config_passes_when_pkg_dir_matches(
tmp_path: Path,
) -> None:
_scaffold_standard_crew(
tmp_path, pyproject=_make_pyproject(name="my_crew", hatchling=True)
)
v = _run_without_import_check(tmp_path)
# src/my_crew/ exists, so hatch default should find it — no wheel error.
assert "hatch_wheel_target_missing" not in _codes(v)
def test_hatchling_with_explicit_wheel_config_passes(tmp_path: Path) -> None:
extra = (
"[tool.hatch.build.targets.wheel]\n"
'packages = ["src/my_crew"]'
)
_scaffold_standard_crew(
tmp_path,
pyproject=_make_pyproject(name="my_crew", hatchling=True, extra=extra),
)
v = _run_without_import_check(tmp_path)
assert "hatch_wheel_target_missing" not in _codes(v)
def test_classify_missing_openai_key_is_warning(tmp_path: Path) -> None:
v = DeployValidator(project_root=tmp_path)
v._classify_import_error(
"ImportError",
"Error importing native provider: 1 validation error for OpenAICompletion\n"
" Value error, OPENAI_API_KEY is required",
tb="",
)
assert len(v.results) == 1
result = v.results[0]
assert result.code == "llm_init_missing_key"
assert result.severity is Severity.WARNING
assert "OPENAI_API_KEY" in result.title
def test_classify_azure_extra_missing_is_error(tmp_path: Path) -> None:
"""The real message raised by the Azure provider module uses plain
double quotes around the install command (no backticks). Match the
exact string that ships in the provider source so this test actually
guards the regex used in production."""
v = DeployValidator(project_root=tmp_path)
v._classify_import_error(
"ImportError",
'Azure AI Inference native provider not available, to install: uv add "crewai[azure-ai-inference]"',
tb="",
)
assert "missing_provider_extra" in _codes(v)
finding = next(r for r in v.results if r.code == "missing_provider_extra")
assert finding.title.startswith("Azure AI Inference")
assert 'uv add "crewai[azure-ai-inference]"' in finding.hint
@pytest.mark.parametrize(
"pkg_label, install_cmd",
[
("Anthropic", 'uv add "crewai[anthropic]"'),
("AWS Bedrock", 'uv add "crewai[bedrock]"'),
("Google Gen AI", 'uv add "crewai[google-genai]"'),
],
)
def test_classify_missing_provider_extra_matches_real_messages(
tmp_path: Path, pkg_label: str, install_cmd: str
) -> None:
"""Regression for the four provider error strings verbatim."""
v = DeployValidator(project_root=tmp_path)
v._classify_import_error(
"ImportError",
f"{pkg_label} native provider not available, to install: {install_cmd}",
tb="",
)
assert "missing_provider_extra" in _codes(v)
finding = next(r for r in v.results if r.code == "missing_provider_extra")
assert install_cmd in finding.hint
def test_classify_keyerror_at_import_is_warning(tmp_path: Path) -> None:
"""Regression for `KeyError: 'SERPLY_API_KEY'` raised at import time."""
v = DeployValidator(project_root=tmp_path)
v._classify_import_error("KeyError", "'SERPLY_API_KEY'", tb="")
codes = _codes(v)
assert "env_var_read_at_import" in codes
def test_classify_no_crewbase_class_is_error(tmp_path: Path) -> None:
v = DeployValidator(project_root=tmp_path)
v._classify_import_error(
"ValueError",
"Crew class annotated with @CrewBase not found.",
tb="",
)
assert "no_crewbase_class" in _codes(v)
def test_classify_no_flow_subclass_is_error(tmp_path: Path) -> None:
v = DeployValidator(project_root=tmp_path)
v._classify_import_error("ValueError", "No Flow subclass found in the module.", tb="")
assert "no_flow_subclass" in _codes(v)
def test_classify_stale_crewai_pin_attribute_error(tmp_path: Path) -> None:
"""Regression for a stale crewai pin missing `_load_response_format`."""
v = DeployValidator(project_root=tmp_path)
v._classify_import_error(
"AttributeError",
"'EmploymentServiceDecisionSupportSystemCrew' object has no attribute '_load_response_format'",
tb="",
)
assert "stale_crewai_pin" in _codes(v)
def test_classify_unknown_error_is_fallback(tmp_path: Path) -> None:
v = DeployValidator(project_root=tmp_path)
v._classify_import_error("RuntimeError", "something weird happened", tb="")
assert "import_failed" in _codes(v)
def test_env_var_referenced_but_missing_warns(tmp_path: Path) -> None:
pkg = _scaffold_standard_crew(tmp_path)
(pkg / "tools.py").write_text(
'import os\nkey = os.getenv("TAVILY_API_KEY")\n'
)
import os
# Make sure the test doesn't inherit the key from the host environment.
with patch.dict(os.environ, {}, clear=False):
os.environ.pop("TAVILY_API_KEY", None)
v = _run_without_import_check(tmp_path)
codes = _codes(v)
assert "env_vars_not_in_dotenv" in codes
def test_env_var_in_dotenv_does_not_warn(tmp_path: Path) -> None:
pkg = _scaffold_standard_crew(tmp_path)
(pkg / "tools.py").write_text(
'import os\nkey = os.getenv("TAVILY_API_KEY")\n'
)
(tmp_path / ".env").write_text("TAVILY_API_KEY=abc\n")
v = _run_without_import_check(tmp_path)
assert "env_vars_not_in_dotenv" not in _codes(v)
def test_old_crewai_pin_in_uv_lock_warns(tmp_path: Path) -> None:
_scaffold_standard_crew(tmp_path)
(tmp_path / "uv.lock").write_text(
'name = "crewai"\nversion = "1.10.0"\nsource = { registry = "..." }\n'
)
v = _run_without_import_check(tmp_path)
assert "old_crewai_pin" in _codes(v)
def test_modern_crewai_pin_does_not_warn(tmp_path: Path) -> None:
_scaffold_standard_crew(tmp_path)
(tmp_path / "uv.lock").write_text(
'name = "crewai"\nversion = "1.14.1"\nsource = { registry = "..." }\n'
)
v = _run_without_import_check(tmp_path)
assert "old_crewai_pin" not in _codes(v)
def test_create_crew_aborts_on_validation_error(tmp_path: Path) -> None:
"""`crewai deploy create` must not contact the API when validation fails."""
from unittest.mock import MagicMock, patch as mock_patch
from crewai_cli.deploy.main import DeployCommand
with (
mock_patch("crewai_cli.command.get_auth_token", return_value="tok"),
mock_patch("crewai_cli.deploy.main.get_project_name", return_value="p"),
mock_patch("crewai_cli.command.PlusAPI") as mock_api,
mock_patch.object(DeployCommand, "_validate_project_structure"),
mock_patch(
"crewai_cli.deploy.main.validate_project"
) as mock_validate,
):
mock_validate.return_value = MagicMock(ok=False)
cmd = DeployCommand()
cmd.create_crew()
assert not cmd.plus_api_client.create_crew.called
del mock_api # silence unused-var lint

View File

@@ -93,7 +93,7 @@ def test_version_command_with_tools(runner):
def test_test_default_iterations(evaluate_crew, runner):
result = runner.invoke(test)
evaluate_crew.assert_called_once_with(3, "gpt-4o-mini")
evaluate_crew.assert_called_once_with(3, "gpt-4o-mini", trained_agents_file=None)
assert result.exit_code == 0
assert "Testing the crew for 3 iterations with model gpt-4o-mini" in result.output
@@ -102,7 +102,7 @@ def test_test_default_iterations(evaluate_crew, runner):
def test_test_custom_iterations(evaluate_crew, runner):
result = runner.invoke(test, ["--n_iterations", "5", "--model", "gpt-4o"])
evaluate_crew.assert_called_once_with(5, "gpt-4o")
evaluate_crew.assert_called_once_with(5, "gpt-4o", trained_agents_file=None)
assert result.exit_code == 0
assert "Testing the crew for 5 iterations with model gpt-4o" in result.output
@@ -153,7 +153,7 @@ def test_deploy_push(command, runner):
result = runner.invoke(deploy_push, ["-u", uuid])
assert result.exit_code == 0
mock_deploy.deploy.assert_called_once_with(uuid=uuid)
mock_deploy.deploy.assert_called_once_with(uuid=uuid, skip_validate=False)
@mock.patch("crewai_cli.cli.DeployCommand")
@@ -162,7 +162,7 @@ def test_deploy_push_no_uuid(command, runner):
result = runner.invoke(deploy_push)
assert result.exit_code == 0
mock_deploy.deploy.assert_called_once_with(uuid=None)
mock_deploy.deploy.assert_called_once_with(uuid=None, skip_validate=False)
@mock.patch("crewai_cli.cli.DeployCommand")

View File

@@ -27,6 +27,7 @@ def test_crew_success(mock_subprocess_run, n_iterations, model):
capture_output=False,
text=True,
check=True,
env=mock.ANY,
)
assert result is None
@@ -66,6 +67,7 @@ def test_test_crew_called_process_error(mock_subprocess_run, click):
capture_output=False,
text=True,
check=True,
env=mock.ANY,
)
click.echo.assert_has_calls(
[
@@ -91,7 +93,30 @@ def test_test_crew_unexpected_exception(mock_subprocess_run, click):
capture_output=False,
text=True,
check=True,
env=mock.ANY,
)
click.echo.assert_called_once_with(
"An unexpected error occurred: Unexpected error", err=True
)
@mock.patch("crewai_cli.evaluate_crew.subprocess.run")
def test_evaluate_crew_sets_trained_agents_env_var(mock_subprocess_run):
mock_subprocess_run.return_value = subprocess.CompletedProcess(
args=["uv", "run", "test", "1", "gpt-4o"], returncode=0
)
evaluate_crew.evaluate_crew(1, "gpt-4o", trained_agents_file="my_custom.pkl")
_, kwargs = mock_subprocess_run.call_args
assert kwargs["env"]["CREWAI_TRAINED_AGENTS_FILE"] == "my_custom.pkl"
@mock.patch("crewai_cli.evaluate_crew.subprocess.run")
def test_evaluate_crew_omits_env_var_without_filename(mock_subprocess_run):
mock_subprocess_run.return_value = subprocess.CompletedProcess(
args=["uv", "run", "test", "1", "gpt-4o"], returncode=0
)
evaluate_crew.evaluate_crew(1, "gpt-4o")
_, kwargs = mock_subprocess_run.call_args
assert "CREWAI_TRAINED_AGENTS_FILE" not in kwargs["env"]

View File

@@ -124,6 +124,7 @@ class TestPlusAPI(unittest.TestCase):
"file": encoded_file,
"description": description,
"available_exports": None,
"tools_metadata": None,
}
mock_make_request.assert_called_once_with(
"POST", "/crewai_plus/api/v1/tools", json=params
@@ -161,6 +162,7 @@ class TestPlusAPI(unittest.TestCase):
"file": encoded_file,
"description": description,
"available_exports": None,
"tools_metadata": None,
}
self.assert_request_with_org_id(
@@ -189,6 +191,7 @@ class TestPlusAPI(unittest.TestCase):
"file": encoded_file,
"description": description,
"available_exports": None,
"tools_metadata": None,
}
mock_make_request.assert_called_once_with(
"POST", "/crewai_plus/api/v1/tools", json=params

View File

@@ -187,9 +187,14 @@ def test_publish_when_not_in_sync(mock_is_synced, mock_fetch, capsys, tool_comma
"crewai_cli.tools.main.extract_available_exports",
return_value=[{"name": "SampleTool"}],
)
@patch(
"crewai_cli.tools.main.extract_tools_metadata",
return_value=[{"name": "SampleTool", "humanized_name": "sample_tool", "description": "A sample tool", "run_params_schema": {}, "init_params_schema": {}, "env_vars": []}],
)
@patch("crewai_cli.tools.main.ToolCommand._print_current_organization")
def test_publish_when_not_in_sync_and_force(
mock_print_org,
mock_tools_metadata,
mock_available_exports,
mock_is_synced,
mock_publish,
@@ -216,6 +221,7 @@ def test_publish_when_not_in_sync_and_force(
["uv", "build", "--sdist", "--out-dir", unittest.mock.ANY],
check=True,
capture_output=False,
env=unittest.mock.ANY,
)
mock_open.assert_called_with(unittest.mock.ANY, "rb")
mock_publish.assert_called_with(
@@ -225,6 +231,7 @@ def test_publish_when_not_in_sync_and_force(
description="A sample tool",
encoded_file=unittest.mock.ANY,
available_exports=[{"name": "SampleTool"}],
tools_metadata=[{"name": "SampleTool", "humanized_name": "sample_tool", "description": "A sample tool", "run_params_schema": {}, "init_params_schema": {}, "env_vars": []}],
)
mock_print_org.assert_called_once()
@@ -246,7 +253,12 @@ def test_publish_when_not_in_sync_and_force(
"crewai_cli.tools.main.extract_available_exports",
return_value=[{"name": "SampleTool"}],
)
@patch(
"crewai_cli.tools.main.extract_tools_metadata",
return_value=[{"name": "SampleTool", "humanized_name": "sample_tool", "description": "A sample tool", "run_params_schema": {}, "init_params_schema": {}, "env_vars": []}],
)
def test_publish_success(
mock_tools_metadata,
mock_available_exports,
mock_is_synced,
mock_publish,
@@ -273,6 +285,7 @@ def test_publish_success(
["uv", "build", "--sdist", "--out-dir", unittest.mock.ANY],
check=True,
capture_output=False,
env=unittest.mock.ANY,
)
mock_open.assert_called_with(unittest.mock.ANY, "rb")
mock_publish.assert_called_with(
@@ -282,6 +295,7 @@ def test_publish_success(
description="A sample tool",
encoded_file=unittest.mock.ANY,
available_exports=[{"name": "SampleTool"}],
tools_metadata=[{"name": "SampleTool", "humanized_name": "sample_tool", "description": "A sample tool", "run_params_schema": {}, "init_params_schema": {}, "env_vars": []}],
)
@@ -300,7 +314,12 @@ def test_publish_success(
"crewai_cli.tools.main.extract_available_exports",
return_value=[{"name": "SampleTool"}],
)
@patch(
"crewai_cli.tools.main.extract_tools_metadata",
return_value=[{"name": "SampleTool", "humanized_name": "sample_tool", "description": "A sample tool", "run_params_schema": {}, "init_params_schema": {}, "env_vars": []}],
)
def test_publish_failure(
mock_tools_metadata,
mock_available_exports,
mock_publish,
mock_open,
@@ -341,7 +360,12 @@ def test_publish_failure(
"crewai_cli.tools.main.extract_available_exports",
return_value=[{"name": "SampleTool"}],
)
@patch(
"crewai_cli.tools.main.extract_tools_metadata",
return_value=[{"name": "SampleTool", "humanized_name": "sample_tool", "description": "A sample tool", "run_params_schema": {}, "init_params_schema": {}, "env_vars": []}],
)
def test_publish_api_error(
mock_tools_metadata,
mock_available_exports,
mock_publish,
mock_open,
@@ -367,6 +391,63 @@ def test_publish_api_error(
mock_publish.assert_called_once()
@patch("crewai_cli.tools.main.get_project_name", return_value="sample-tool")
@patch("crewai_cli.tools.main.get_project_version", return_value="1.0.0")
@patch("crewai_cli.tools.main.get_project_description", return_value="A sample tool")
@patch("crewai_cli.tools.main.subprocess.run")
@patch("crewai_cli.tools.main.os.listdir", return_value=["sample-tool-1.0.0.tar.gz"])
@patch(
"crewai_cli.tools.main.open",
new_callable=unittest.mock.mock_open,
read_data=b"sample tarball content",
)
@patch("crewai_cli.plus_api.PlusAPI.publish_tool")
@patch("crewai_cli.tools.main.git.Repository.is_synced", return_value=True)
@patch(
"crewai_cli.tools.main.extract_available_exports",
return_value=[{"name": "SampleTool"}],
)
@patch(
"crewai_cli.tools.main.extract_tools_metadata",
side_effect=Exception("Failed to extract metadata"),
)
def test_publish_metadata_extraction_failure_continues_with_warning(
mock_tools_metadata,
mock_available_exports,
mock_is_synced,
mock_publish,
mock_open,
mock_listdir,
mock_subprocess_run,
mock_get_project_description,
mock_get_project_version,
mock_get_project_name,
capsys,
tool_command,
):
"""Test that metadata extraction failure shows warning but continues publishing."""
mock_publish_response = MagicMock()
mock_publish_response.status_code = 200
mock_publish_response.json.return_value = {"handle": "sample-tool"}
mock_publish.return_value = mock_publish_response
tool_command.publish(is_public=True)
output = capsys.readouterr().out
assert "Warning: Could not extract tool metadata" in output
assert "Publishing will continue without detailed metadata" in output
assert "No tool metadata extracted" in output
mock_publish.assert_called_once_with(
handle="sample-tool",
is_public=True,
version="1.0.0",
description="A sample tool",
encoded_file=unittest.mock.ANY,
available_exports=[{"name": "SampleTool"}],
tools_metadata=[],
)
@patch("crewai_cli.tools.main.Settings")
def test_print_current_organization_with_org(mock_settings, capsys, tool_command):
mock_settings_instance = MagicMock()