mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-05-07 10:12:38 +00:00
refactor: extract CLI into standalone crewai-cli package
This commit is contained in:
1
lib/cli/tests/deploy/__init__.py
Normal file
1
lib/cli/tests/deploy/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Tests for CLI deploy."""
|
||||
271
lib/cli/tests/deploy/test_deploy_main.py
Normal file
271
lib/cli/tests/deploy/test_deploy_main.py
Normal file
@@ -0,0 +1,271 @@
|
||||
import sys
|
||||
import unittest
|
||||
from io import StringIO
|
||||
from unittest.mock import MagicMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
import json
|
||||
|
||||
import httpx
|
||||
from crewai_cli.deploy.main import DeployCommand
|
||||
from crewai_cli.utils import parse_toml
|
||||
|
||||
|
||||
class TestDeployCommand(unittest.TestCase):
|
||||
@patch("crewai_cli.command.get_auth_token")
|
||||
@patch("crewai_cli.deploy.main.get_project_name")
|
||||
@patch("crewai_cli.command.PlusAPI")
|
||||
def setUp(
|
||||
self,
|
||||
mock_plus_api,
|
||||
mock_get_project_name,
|
||||
mock_get_auth_token,
|
||||
):
|
||||
self.mock_get_auth_token = mock_get_auth_token
|
||||
self.mock_get_project_name = mock_get_project_name
|
||||
self.mock_plus_api = mock_plus_api
|
||||
|
||||
self.mock_get_auth_token.return_value = "test_token"
|
||||
self.mock_get_project_name.return_value = "test_project"
|
||||
|
||||
self.deploy_command = DeployCommand()
|
||||
self.mock_client = self.deploy_command.plus_api_client
|
||||
|
||||
def test_init_success(self):
|
||||
self.assertEqual(self.deploy_command.project_name, "test_project")
|
||||
self.mock_plus_api.assert_called_once_with(api_key="test_token")
|
||||
|
||||
@patch("crewai_cli.command.get_auth_token")
|
||||
def test_init_failure(self, mock_get_auth_token):
|
||||
mock_get_auth_token.side_effect = Exception("Auth failed")
|
||||
|
||||
with self.assertRaises(SystemExit):
|
||||
DeployCommand()
|
||||
|
||||
def test_validate_response_successful_response(self):
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.json.return_value = {"message": "Success"}
|
||||
mock_response.status_code = 200
|
||||
mock_response.is_success = True
|
||||
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
self.deploy_command._validate_response(mock_response)
|
||||
assert fake_out.getvalue() == ""
|
||||
|
||||
def test_validate_response_json_decode_error(self):
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.json.side_effect = json.JSONDecodeError("Decode error", "", 0)
|
||||
mock_response.status_code = 500
|
||||
mock_response.content = b"Invalid JSON"
|
||||
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
with pytest.raises(SystemExit):
|
||||
self.deploy_command._validate_response(mock_response)
|
||||
output = fake_out.getvalue()
|
||||
assert (
|
||||
"Failed to parse response from Enterprise API failed. Details:"
|
||||
in output
|
||||
)
|
||||
assert "Status Code: 500" in output
|
||||
assert "Response:\nInvalid JSON" in output
|
||||
|
||||
def test_validate_response_422_error(self):
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.json.return_value = {
|
||||
"field1": ["Error message 1"],
|
||||
"field2": ["Error message 2"],
|
||||
}
|
||||
mock_response.status_code = 422
|
||||
mock_response.is_success = False
|
||||
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
with pytest.raises(SystemExit):
|
||||
self.deploy_command._validate_response(mock_response)
|
||||
output = fake_out.getvalue()
|
||||
assert (
|
||||
"Failed to complete operation. Please fix the following errors:"
|
||||
in output
|
||||
)
|
||||
assert "Field1 Error message 1" in output
|
||||
assert "Field2 Error message 2" in output
|
||||
|
||||
def test_validate_response_other_error(self):
|
||||
mock_response = Mock(spec=httpx.Response)
|
||||
mock_response.json.return_value = {"error": "Something went wrong"}
|
||||
mock_response.status_code = 500
|
||||
mock_response.is_success = False
|
||||
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
with pytest.raises(SystemExit):
|
||||
self.deploy_command._validate_response(mock_response)
|
||||
output = fake_out.getvalue()
|
||||
assert "Request to Enterprise API failed. Details:" in output
|
||||
assert "Details:\nSomething went wrong" in output
|
||||
|
||||
def test_standard_no_param_error_message(self):
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
self.deploy_command._standard_no_param_error_message()
|
||||
self.assertIn("No UUID provided", fake_out.getvalue())
|
||||
|
||||
def test_display_deployment_info(self):
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
self.deploy_command._display_deployment_info(
|
||||
{"uuid": "test-uuid", "status": "deployed"}
|
||||
)
|
||||
self.assertIn("Deploying the crew...", fake_out.getvalue())
|
||||
self.assertIn("test-uuid", fake_out.getvalue())
|
||||
self.assertIn("deployed", fake_out.getvalue())
|
||||
|
||||
def test_display_logs(self):
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
self.deploy_command._display_logs(
|
||||
[{"timestamp": "2023-01-01", "level": "INFO", "message": "Test log"}]
|
||||
)
|
||||
self.assertIn("2023-01-01 - INFO: Test log", fake_out.getvalue())
|
||||
|
||||
@patch("crewai_cli.deploy.main.DeployCommand._display_deployment_info")
|
||||
def test_deploy_with_uuid(self, mock_display):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"uuid": "test-uuid"}
|
||||
self.mock_client.deploy_by_uuid.return_value = mock_response
|
||||
|
||||
self.deploy_command.deploy(uuid="test-uuid", skip_validate=True)
|
||||
|
||||
self.mock_client.deploy_by_uuid.assert_called_once_with("test-uuid")
|
||||
mock_display.assert_called_once_with({"uuid": "test-uuid"})
|
||||
|
||||
@patch("crewai_cli.deploy.main.DeployCommand._display_deployment_info")
|
||||
def test_deploy_with_project_name(self, mock_display):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"uuid": "test-uuid"}
|
||||
self.mock_client.deploy_by_name.return_value = mock_response
|
||||
|
||||
self.deploy_command.deploy(skip_validate=True)
|
||||
|
||||
self.mock_client.deploy_by_name.assert_called_once_with("test_project")
|
||||
mock_display.assert_called_once_with({"uuid": "test-uuid"})
|
||||
|
||||
@patch("crewai_cli.deploy.main.fetch_and_json_env_file")
|
||||
@patch("crewai_cli.deploy.main.git.Repository.origin_url")
|
||||
@patch("builtins.input")
|
||||
def test_create_crew(self, mock_input, mock_git_origin_url, mock_fetch_env):
|
||||
mock_fetch_env.return_value = {"ENV_VAR": "value"}
|
||||
mock_git_origin_url.return_value = "https://github.com/test/repo.git"
|
||||
mock_input.return_value = ""
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 201
|
||||
mock_response.json.return_value = {"uuid": "new-uuid", "status": "created"}
|
||||
self.mock_client.create_crew.return_value = mock_response
|
||||
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
self.deploy_command.create_crew(skip_validate=True)
|
||||
self.assertIn("Deployment created successfully!", fake_out.getvalue())
|
||||
self.assertIn("new-uuid", fake_out.getvalue())
|
||||
|
||||
def test_list_crews(self):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = [
|
||||
{"name": "Crew1", "uuid": "uuid1", "status": "active"},
|
||||
{"name": "Crew2", "uuid": "uuid2", "status": "inactive"},
|
||||
]
|
||||
self.mock_client.list_crews.return_value = mock_response
|
||||
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
self.deploy_command.list_crews()
|
||||
self.assertIn("Crew1 (uuid1) active", fake_out.getvalue())
|
||||
self.assertIn("Crew2 (uuid2) inactive", fake_out.getvalue())
|
||||
|
||||
def test_get_crew_status(self):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"name": "InternalCrew", "status": "active"}
|
||||
self.mock_client.crew_status_by_name.return_value = mock_response
|
||||
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
self.deploy_command.get_crew_status()
|
||||
self.assertIn("InternalCrew", fake_out.getvalue())
|
||||
self.assertIn("active", fake_out.getvalue())
|
||||
|
||||
def test_get_crew_logs(self):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = [
|
||||
{"timestamp": "2023-01-01", "level": "INFO", "message": "Log1"},
|
||||
{"timestamp": "2023-01-02", "level": "ERROR", "message": "Log2"},
|
||||
]
|
||||
self.mock_client.crew_by_name.return_value = mock_response
|
||||
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
self.deploy_command.get_crew_logs(None)
|
||||
self.assertIn("2023-01-01 - INFO: Log1", fake_out.getvalue())
|
||||
self.assertIn("2023-01-02 - ERROR: Log2", fake_out.getvalue())
|
||||
|
||||
def test_remove_crew(self):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 204
|
||||
self.mock_client.delete_crew_by_name.return_value = mock_response
|
||||
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
self.deploy_command.remove_crew(None)
|
||||
self.assertIn(
|
||||
"Crew 'test_project' removed successfully", fake_out.getvalue()
|
||||
)
|
||||
|
||||
@unittest.skipIf(sys.version_info < (3, 11), "Requires Python 3.11+")
|
||||
def test_parse_toml_python_311_plus(self):
|
||||
toml_content = """
|
||||
[tool.poetry]
|
||||
name = "test_project"
|
||||
version = "0.1.0"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.11"
|
||||
crewai = { extras = ["tools"], version = ">=0.51.0,<1.0.0" }
|
||||
"""
|
||||
parsed = parse_toml(toml_content)
|
||||
self.assertEqual(parsed["tool"]["poetry"]["name"], "test_project")
|
||||
|
||||
@patch(
|
||||
"builtins.open",
|
||||
new_callable=unittest.mock.mock_open,
|
||||
read_data="""
|
||||
[project]
|
||||
name = "test_project"
|
||||
version = "0.1.0"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = ["crewai"]
|
||||
""",
|
||||
)
|
||||
def test_get_project_name_python_310(self, mock_open):
|
||||
from crewai_cli.utils import get_project_name
|
||||
|
||||
project_name = get_project_name()
|
||||
print("project_name", project_name)
|
||||
self.assertEqual(project_name, "test_project")
|
||||
|
||||
@unittest.skipIf(sys.version_info < (3, 11), "Requires Python 3.11+")
|
||||
@patch(
|
||||
"builtins.open",
|
||||
new_callable=unittest.mock.mock_open,
|
||||
read_data="""
|
||||
[project]
|
||||
name = "test_project"
|
||||
version = "0.1.0"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = ["crewai"]
|
||||
""",
|
||||
)
|
||||
def test_get_project_name_python_311_plus(self, mock_open):
|
||||
from crewai_cli.utils import get_project_name
|
||||
|
||||
project_name = get_project_name()
|
||||
self.assertEqual(project_name, "test_project")
|
||||
|
||||
def test_get_crewai_version(self):
|
||||
from crewai_cli.version import get_crewai_version
|
||||
|
||||
assert isinstance(get_crewai_version(), str)
|
||||
430
lib/cli/tests/deploy/test_validate.py
Normal file
430
lib/cli/tests/deploy/test_validate.py
Normal file
@@ -0,0 +1,430 @@
|
||||
"""Tests for `crewai.cli.deploy.validate`.
|
||||
|
||||
The fixtures here correspond 1:1 to the deployment-failure patterns observed
|
||||
in the #crewai-deployment-failures Slack channel that motivated this work.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from textwrap import dedent
|
||||
from typing import Iterable
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai_cli.deploy.validate import (
|
||||
DeployValidator,
|
||||
Severity,
|
||||
normalize_package_name,
|
||||
)
|
||||
|
||||
|
||||
def _make_pyproject(
|
||||
name: str = "my_crew",
|
||||
dependencies: Iterable[str] = ("crewai>=1.14.0",),
|
||||
*,
|
||||
hatchling: bool = False,
|
||||
flow: bool = False,
|
||||
extra: str = "",
|
||||
) -> str:
|
||||
deps = ", ".join(f'"{d}"' for d in dependencies)
|
||||
lines = [
|
||||
"[project]",
|
||||
f'name = "{name}"',
|
||||
'version = "0.1.0"',
|
||||
f"dependencies = [{deps}]",
|
||||
]
|
||||
if hatchling:
|
||||
lines += [
|
||||
"",
|
||||
"[build-system]",
|
||||
'requires = ["hatchling"]',
|
||||
'build-backend = "hatchling.build"',
|
||||
]
|
||||
if flow:
|
||||
lines += ["", "[tool.crewai]", 'type = "flow"']
|
||||
if extra:
|
||||
lines += ["", extra]
|
||||
return "\n".join(lines) + "\n"
|
||||
|
||||
|
||||
def _scaffold_standard_crew(
|
||||
root: Path,
|
||||
*,
|
||||
name: str = "my_crew",
|
||||
include_crew_py: bool = True,
|
||||
include_agents_yaml: bool = True,
|
||||
include_tasks_yaml: bool = True,
|
||||
include_lockfile: bool = True,
|
||||
pyproject: str | None = None,
|
||||
) -> Path:
|
||||
(root / "pyproject.toml").write_text(pyproject or _make_pyproject(name=name))
|
||||
if include_lockfile:
|
||||
(root / "uv.lock").write_text("# dummy uv lockfile\n")
|
||||
|
||||
pkg_dir = root / "src" / normalize_package_name(name)
|
||||
pkg_dir.mkdir(parents=True)
|
||||
(pkg_dir / "__init__.py").write_text("")
|
||||
|
||||
if include_crew_py:
|
||||
(pkg_dir / "crew.py").write_text(
|
||||
dedent(
|
||||
"""
|
||||
from crewai.project import CrewBase, crew
|
||||
|
||||
@CrewBase
|
||||
class MyCrew:
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
from crewai import Crew
|
||||
return Crew(agents=[], tasks=[])
|
||||
"""
|
||||
).strip()
|
||||
+ "\n"
|
||||
)
|
||||
|
||||
config_dir = pkg_dir / "config"
|
||||
config_dir.mkdir()
|
||||
if include_agents_yaml:
|
||||
(config_dir / "agents.yaml").write_text("{}\n")
|
||||
if include_tasks_yaml:
|
||||
(config_dir / "tasks.yaml").write_text("{}\n")
|
||||
|
||||
return pkg_dir
|
||||
|
||||
|
||||
def _codes(validator: DeployValidator) -> set[str]:
|
||||
return {r.code for r in validator.results}
|
||||
|
||||
|
||||
def _run_without_import_check(root: Path) -> DeployValidator:
|
||||
"""Run validation with the subprocess-based import check stubbed out;
|
||||
the classifier is exercised directly in its own tests below."""
|
||||
with patch.object(DeployValidator, "_check_module_imports", lambda self: None):
|
||||
v = DeployValidator(project_root=root)
|
||||
v.run()
|
||||
return v
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"project_name, expected",
|
||||
[
|
||||
("my-crew", "my_crew"),
|
||||
("My Cool-Project", "my_cool_project"),
|
||||
("crew123", "crew123"),
|
||||
("crew.name!with$chars", "crewnamewithchars"),
|
||||
],
|
||||
)
|
||||
def test_normalize_package_name(project_name: str, expected: str) -> None:
|
||||
assert normalize_package_name(project_name) == expected
|
||||
|
||||
|
||||
def test_valid_standard_crew_project_passes(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert v.ok, f"expected clean run, got {v.results}"
|
||||
|
||||
|
||||
def test_missing_pyproject_errors(tmp_path: Path) -> None:
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_pyproject" in _codes(v)
|
||||
assert not v.ok
|
||||
|
||||
|
||||
def test_invalid_pyproject_errors(tmp_path: Path) -> None:
|
||||
(tmp_path / "pyproject.toml").write_text("this is not valid toml ====\n")
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "invalid_pyproject" in _codes(v)
|
||||
|
||||
|
||||
def test_missing_project_name_errors(tmp_path: Path) -> None:
|
||||
(tmp_path / "pyproject.toml").write_text(
|
||||
'[project]\nversion = "0.1.0"\ndependencies = ["crewai>=1.14.0"]\n'
|
||||
)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_project_name" in _codes(v)
|
||||
|
||||
|
||||
def test_missing_lockfile_errors(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path, include_lockfile=False)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_lockfile" in _codes(v)
|
||||
|
||||
|
||||
def test_poetry_lock_is_accepted(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path, include_lockfile=False)
|
||||
(tmp_path / "poetry.lock").write_text("# poetry lockfile\n")
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_lockfile" not in _codes(v)
|
||||
|
||||
|
||||
def test_stale_lockfile_warns(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path)
|
||||
# Make lockfile older than pyproject.
|
||||
lock = tmp_path / "uv.lock"
|
||||
pyproject = tmp_path / "pyproject.toml"
|
||||
old_time = pyproject.stat().st_mtime - 60
|
||||
import os
|
||||
|
||||
os.utime(lock, (old_time, old_time))
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "stale_lockfile" in _codes(v)
|
||||
# Stale is a warning, so the run can still be ok (no errors).
|
||||
assert v.ok
|
||||
|
||||
|
||||
def test_missing_package_dir_errors(tmp_path: Path) -> None:
|
||||
# pyproject says name=my_crew but we only create src/other_pkg/
|
||||
(tmp_path / "pyproject.toml").write_text(_make_pyproject(name="my_crew"))
|
||||
(tmp_path / "uv.lock").write_text("")
|
||||
(tmp_path / "src" / "other_pkg").mkdir(parents=True)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
codes = _codes(v)
|
||||
assert "missing_package_dir" in codes
|
||||
finding = next(r for r in v.results if r.code == "missing_package_dir")
|
||||
assert "other_pkg" in finding.hint
|
||||
|
||||
|
||||
def test_egg_info_only_errors_with_targeted_hint(tmp_path: Path) -> None:
|
||||
"""Regression for the case where only src/<name>.egg-info/ exists."""
|
||||
(tmp_path / "pyproject.toml").write_text(_make_pyproject(name="odoo_pm_agents"))
|
||||
(tmp_path / "uv.lock").write_text("")
|
||||
(tmp_path / "src" / "odoo_pm_agents.egg-info").mkdir(parents=True)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
finding = next(r for r in v.results if r.code == "missing_package_dir")
|
||||
assert "egg-info" in finding.hint
|
||||
|
||||
|
||||
def test_stale_egg_info_sibling_warns(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path)
|
||||
(tmp_path / "src" / "my_crew.egg-info").mkdir()
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "stale_egg_info" in _codes(v)
|
||||
|
||||
|
||||
def test_missing_crew_py_errors(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path, include_crew_py=False)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_crew_py" in _codes(v)
|
||||
|
||||
|
||||
def test_missing_agents_yaml_errors(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path, include_agents_yaml=False)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_agents_yaml" in _codes(v)
|
||||
|
||||
|
||||
def test_missing_tasks_yaml_errors(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path, include_tasks_yaml=False)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_tasks_yaml" in _codes(v)
|
||||
|
||||
|
||||
def test_flow_project_requires_main_py(tmp_path: Path) -> None:
|
||||
(tmp_path / "pyproject.toml").write_text(
|
||||
_make_pyproject(name="my_flow", flow=True)
|
||||
)
|
||||
(tmp_path / "uv.lock").write_text("")
|
||||
(tmp_path / "src" / "my_flow").mkdir(parents=True)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_flow_main" in _codes(v)
|
||||
|
||||
|
||||
def test_flow_project_with_main_py_passes(tmp_path: Path) -> None:
|
||||
(tmp_path / "pyproject.toml").write_text(
|
||||
_make_pyproject(name="my_flow", flow=True)
|
||||
)
|
||||
(tmp_path / "uv.lock").write_text("")
|
||||
pkg = tmp_path / "src" / "my_flow"
|
||||
pkg.mkdir(parents=True)
|
||||
(pkg / "main.py").write_text("# flow entrypoint\n")
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_flow_main" not in _codes(v)
|
||||
|
||||
|
||||
def test_hatchling_without_wheel_config_passes_when_pkg_dir_matches(
|
||||
tmp_path: Path,
|
||||
) -> None:
|
||||
_scaffold_standard_crew(
|
||||
tmp_path, pyproject=_make_pyproject(name="my_crew", hatchling=True)
|
||||
)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
# src/my_crew/ exists, so hatch default should find it — no wheel error.
|
||||
assert "hatch_wheel_target_missing" not in _codes(v)
|
||||
|
||||
|
||||
def test_hatchling_with_explicit_wheel_config_passes(tmp_path: Path) -> None:
|
||||
extra = (
|
||||
"[tool.hatch.build.targets.wheel]\n"
|
||||
'packages = ["src/my_crew"]'
|
||||
)
|
||||
_scaffold_standard_crew(
|
||||
tmp_path,
|
||||
pyproject=_make_pyproject(name="my_crew", hatchling=True, extra=extra),
|
||||
)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "hatch_wheel_target_missing" not in _codes(v)
|
||||
|
||||
|
||||
def test_classify_missing_openai_key_is_warning(tmp_path: Path) -> None:
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error(
|
||||
"ImportError",
|
||||
"Error importing native provider: 1 validation error for OpenAICompletion\n"
|
||||
" Value error, OPENAI_API_KEY is required",
|
||||
tb="",
|
||||
)
|
||||
assert len(v.results) == 1
|
||||
result = v.results[0]
|
||||
assert result.code == "llm_init_missing_key"
|
||||
assert result.severity is Severity.WARNING
|
||||
assert "OPENAI_API_KEY" in result.title
|
||||
|
||||
|
||||
def test_classify_azure_extra_missing_is_error(tmp_path: Path) -> None:
|
||||
"""The real message raised by the Azure provider module uses plain
|
||||
double quotes around the install command (no backticks). Match the
|
||||
exact string that ships in the provider source so this test actually
|
||||
guards the regex used in production."""
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error(
|
||||
"ImportError",
|
||||
'Azure AI Inference native provider not available, to install: uv add "crewai[azure-ai-inference]"',
|
||||
tb="",
|
||||
)
|
||||
assert "missing_provider_extra" in _codes(v)
|
||||
finding = next(r for r in v.results if r.code == "missing_provider_extra")
|
||||
assert finding.title.startswith("Azure AI Inference")
|
||||
assert 'uv add "crewai[azure-ai-inference]"' in finding.hint
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"pkg_label, install_cmd",
|
||||
[
|
||||
("Anthropic", 'uv add "crewai[anthropic]"'),
|
||||
("AWS Bedrock", 'uv add "crewai[bedrock]"'),
|
||||
("Google Gen AI", 'uv add "crewai[google-genai]"'),
|
||||
],
|
||||
)
|
||||
def test_classify_missing_provider_extra_matches_real_messages(
|
||||
tmp_path: Path, pkg_label: str, install_cmd: str
|
||||
) -> None:
|
||||
"""Regression for the four provider error strings verbatim."""
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error(
|
||||
"ImportError",
|
||||
f"{pkg_label} native provider not available, to install: {install_cmd}",
|
||||
tb="",
|
||||
)
|
||||
assert "missing_provider_extra" in _codes(v)
|
||||
finding = next(r for r in v.results if r.code == "missing_provider_extra")
|
||||
assert install_cmd in finding.hint
|
||||
|
||||
|
||||
def test_classify_keyerror_at_import_is_warning(tmp_path: Path) -> None:
|
||||
"""Regression for `KeyError: 'SERPLY_API_KEY'` raised at import time."""
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error("KeyError", "'SERPLY_API_KEY'", tb="")
|
||||
codes = _codes(v)
|
||||
assert "env_var_read_at_import" in codes
|
||||
|
||||
|
||||
def test_classify_no_crewbase_class_is_error(tmp_path: Path) -> None:
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error(
|
||||
"ValueError",
|
||||
"Crew class annotated with @CrewBase not found.",
|
||||
tb="",
|
||||
)
|
||||
assert "no_crewbase_class" in _codes(v)
|
||||
|
||||
|
||||
def test_classify_no_flow_subclass_is_error(tmp_path: Path) -> None:
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error("ValueError", "No Flow subclass found in the module.", tb="")
|
||||
assert "no_flow_subclass" in _codes(v)
|
||||
|
||||
|
||||
def test_classify_stale_crewai_pin_attribute_error(tmp_path: Path) -> None:
|
||||
"""Regression for a stale crewai pin missing `_load_response_format`."""
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error(
|
||||
"AttributeError",
|
||||
"'EmploymentServiceDecisionSupportSystemCrew' object has no attribute '_load_response_format'",
|
||||
tb="",
|
||||
)
|
||||
assert "stale_crewai_pin" in _codes(v)
|
||||
|
||||
|
||||
def test_classify_unknown_error_is_fallback(tmp_path: Path) -> None:
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error("RuntimeError", "something weird happened", tb="")
|
||||
assert "import_failed" in _codes(v)
|
||||
|
||||
|
||||
def test_env_var_referenced_but_missing_warns(tmp_path: Path) -> None:
|
||||
pkg = _scaffold_standard_crew(tmp_path)
|
||||
(pkg / "tools.py").write_text(
|
||||
'import os\nkey = os.getenv("TAVILY_API_KEY")\n'
|
||||
)
|
||||
import os
|
||||
|
||||
# Make sure the test doesn't inherit the key from the host environment.
|
||||
with patch.dict(os.environ, {}, clear=False):
|
||||
os.environ.pop("TAVILY_API_KEY", None)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
codes = _codes(v)
|
||||
assert "env_vars_not_in_dotenv" in codes
|
||||
|
||||
|
||||
def test_env_var_in_dotenv_does_not_warn(tmp_path: Path) -> None:
|
||||
pkg = _scaffold_standard_crew(tmp_path)
|
||||
(pkg / "tools.py").write_text(
|
||||
'import os\nkey = os.getenv("TAVILY_API_KEY")\n'
|
||||
)
|
||||
(tmp_path / ".env").write_text("TAVILY_API_KEY=abc\n")
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "env_vars_not_in_dotenv" not in _codes(v)
|
||||
|
||||
|
||||
def test_old_crewai_pin_in_uv_lock_warns(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path)
|
||||
(tmp_path / "uv.lock").write_text(
|
||||
'name = "crewai"\nversion = "1.10.0"\nsource = { registry = "..." }\n'
|
||||
)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "old_crewai_pin" in _codes(v)
|
||||
|
||||
|
||||
def test_modern_crewai_pin_does_not_warn(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path)
|
||||
(tmp_path / "uv.lock").write_text(
|
||||
'name = "crewai"\nversion = "1.14.1"\nsource = { registry = "..." }\n'
|
||||
)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "old_crewai_pin" not in _codes(v)
|
||||
|
||||
|
||||
def test_create_crew_aborts_on_validation_error(tmp_path: Path) -> None:
|
||||
"""`crewai deploy create` must not contact the API when validation fails."""
|
||||
from unittest.mock import MagicMock, patch as mock_patch
|
||||
|
||||
from crewai_cli.deploy.main import DeployCommand
|
||||
|
||||
with (
|
||||
mock_patch("crewai_cli.command.get_auth_token", return_value="tok"),
|
||||
mock_patch("crewai_cli.deploy.main.get_project_name", return_value="p"),
|
||||
mock_patch("crewai_cli.command.PlusAPI") as mock_api,
|
||||
mock_patch(
|
||||
"crewai_cli.deploy.main.validate_project"
|
||||
) as mock_validate,
|
||||
):
|
||||
mock_validate.return_value = MagicMock(ok=False)
|
||||
cmd = DeployCommand()
|
||||
cmd.create_crew()
|
||||
assert not cmd.plus_api_client.create_crew.called
|
||||
del mock_api # silence unused-var lint
|
||||
Reference in New Issue
Block a user