Compare commits

..

5 Commits

Author SHA1 Message Date
Devin AI
9a27669c55 fix: remove unused pytest import in test_hardcoded_secrets.py
Co-Authored-By: João <joao@crewai.com>
2026-03-26 20:08:59 +00:00
github-actions[bot]
067cecf844 chore: update tool specifications 2026-03-26 20:04:21 +00:00
Devin AI
0994b57634 fix: remove hardcoded secrets from source code (issue #5124)
- Replace hardcoded 'YOUR_API_KEY' in create_flow.py with empty placeholder
- Replace hardcoded os.environ assignments in multion_tool example with os.environ.get()
- Replace hardcoded default_api_key values in OpenAI-compatible providers with os.getenv() lookups
- Replace hardcoded API keys in docstring examples with os.environ[] references
- Replace hardcoded bearer tokens in invoke_crewai_automation_tool docstrings
- Add test suite for hardcoded secrets detection (test_hardcoded_secrets.py)
- Update existing test assertions for new default_api_key values

Co-Authored-By: João <joao@crewai.com>
2026-03-26 20:02:16 +00:00
Greyson LaLonde
5bec000b21 feat: auto-update deployment test repo during release
After PyPI publish, clones crewAIInc/crew_deployment_test, bumps the
crewai[tools] pin to the new version, regenerates uv.lock, and pushes
to main. Includes retry logic for CDN propagation delays.
2026-03-27 03:54:10 +08:00
Greyson LaLonde
2965384907 feat: improve enterprise release resilience and UX
- Add --skip-to-enterprise flag to resume just Phase 3 after a failure
- Add --prerelease=allow to uv sync for alpha/beta/rc versions
- Retry uv sync up to 10 times to handle PyPI CDN propagation delay
- Update pyproject.toml [project] version field (fixes apps/api version)
- Print PR URL after creating enterprise bump PR
2026-03-27 03:36:56 +08:00
11 changed files with 372 additions and 35 deletions

View File

@@ -25,7 +25,7 @@ class InvokeCrewAIAutomationTool(BaseTool):
Basic usage:
>>> tool = InvokeCrewAIAutomationTool(
... crew_api_url="https://api.example.com",
... crew_bearer_token="your_token",
... crew_bearer_token=os.environ["CREWAI_BEARER_TOKEN"],
... crew_name="My Crew",
... crew_description="Description of what the crew does",
... )
@@ -39,7 +39,7 @@ class InvokeCrewAIAutomationTool(BaseTool):
... }
>>> tool = InvokeCrewAIAutomationTool(
... crew_api_url="https://api.example.com",
... crew_bearer_token="your_token",
... crew_bearer_token=os.environ["CREWAI_BEARER_TOKEN"],
... crew_name="My Crew",
... crew_description="Description of what the crew does",
... crew_inputs=custom_inputs,
@@ -49,7 +49,7 @@ class InvokeCrewAIAutomationTool(BaseTool):
>>> tools = [
... InvokeCrewAIAutomationTool(
... crew_api_url="https://canary-crew-[...].crewai.com",
... crew_bearer_token="[Your token: abcdef012345]",
... crew_bearer_token=os.environ["CREWAI_BEARER_TOKEN"],
... crew_name="State of AI Report",
... crew_description="Retrieves a report on state of AI for a given year.",
... crew_inputs={

View File

@@ -4,9 +4,10 @@ from crewai import Agent, Crew, Task
from multion_tool import MultiOnTool # type: ignore[import-not-found]
os.environ["OPENAI_API_KEY"] = "Your Key"
if not os.environ.get("OPENAI_API_KEY"):
raise ValueError("Please set the OPENAI_API_KEY environment variable")
multion_browse_tool = MultiOnTool(api_key="Your Key")
multion_browse_tool = MultiOnTool(api_key=os.environ.get("MULTION_API_KEY", ""))
# Create a new agent
Browser = Agent(

View File

@@ -10317,7 +10317,7 @@
"type": "object"
}
},
"description": "A CrewAI tool for invoking external crew/flows APIs.\n\nThis tool provides CrewAI Platform API integration with external crew services, supporting:\n- Dynamic input schema configuration\n- Automatic polling for task completion\n- Bearer token authentication\n- Comprehensive error handling\n\nExample:\n Basic usage:\n >>> tool = InvokeCrewAIAutomationTool(\n ... crew_api_url=\"https://api.example.com\",\n ... crew_bearer_token=\"your_token\",\n ... crew_name=\"My Crew\",\n ... crew_description=\"Description of what the crew does\",\n ... )\n\n With custom inputs:\n >>> custom_inputs = {\n ... \"param1\": Field(..., description=\"Description of param1\"),\n ... \"param2\": Field(\n ... default=\"default_value\", description=\"Description of param2\"\n ... ),\n ... }\n >>> tool = InvokeCrewAIAutomationTool(\n ... crew_api_url=\"https://api.example.com\",\n ... crew_bearer_token=\"your_token\",\n ... crew_name=\"My Crew\",\n ... crew_description=\"Description of what the crew does\",\n ... crew_inputs=custom_inputs,\n ... )\n\nExample:\n >>> tools = [\n ... InvokeCrewAIAutomationTool(\n ... crew_api_url=\"https://canary-crew-[...].crewai.com\",\n ... crew_bearer_token=\"[Your token: abcdef012345]\",\n ... crew_name=\"State of AI Report\",\n ... crew_description=\"Retrieves a report on state of AI for a given year.\",\n ... crew_inputs={\n ... \"year\": Field(\n ... ..., description=\"Year to retrieve the report for (integer)\"\n ... )\n ... },\n ... )\n ... ]",
"description": "A CrewAI tool for invoking external crew/flows APIs.\n\nThis tool provides CrewAI Platform API integration with external crew services, supporting:\n- Dynamic input schema configuration\n- Automatic polling for task completion\n- Bearer token authentication\n- Comprehensive error handling\n\nExample:\n Basic usage:\n >>> tool = InvokeCrewAIAutomationTool(\n ... crew_api_url=\"https://api.example.com\",\n ... crew_bearer_token=os.environ[\"CREWAI_BEARER_TOKEN\"],\n ... crew_name=\"My Crew\",\n ... crew_description=\"Description of what the crew does\",\n ... )\n\n With custom inputs:\n >>> custom_inputs = {\n ... \"param1\": Field(..., description=\"Description of param1\"),\n ... \"param2\": Field(\n ... default=\"default_value\", description=\"Description of param2\"\n ... ),\n ... }\n >>> tool = InvokeCrewAIAutomationTool(\n ... crew_api_url=\"https://api.example.com\",\n ... crew_bearer_token=os.environ[\"CREWAI_BEARER_TOKEN\"],\n ... crew_name=\"My Crew\",\n ... crew_description=\"Description of what the crew does\",\n ... crew_inputs=custom_inputs,\n ... )\n\nExample:\n >>> tools = [\n ... InvokeCrewAIAutomationTool(\n ... crew_api_url=\"https://canary-crew-[...].crewai.com\",\n ... crew_bearer_token=os.environ[\"CREWAI_BEARER_TOKEN\"],\n ... crew_name=\"State of AI Report\",\n ... crew_description=\"Retrieves a report on state of AI for a given year.\",\n ... crew_inputs={\n ... \"year\": Field(\n ... ..., description=\"Year to retrieve the report for (integer)\"\n ... )\n ... },\n ... )\n ... ]",
"properties": {
"crew_api_url": {
"title": "Crew Api Url",

View File

@@ -28,9 +28,9 @@ def create_flow(name: str) -> None:
(project_root / "src" / folder_name / "tools").mkdir(parents=True)
(project_root / "tests").mkdir(exist_ok=True)
# Create .env file
# Create .env file with placeholder
with open(project_root / ".env", "w") as file:
file.write("OPENAI_API_KEY=YOUR_API_KEY")
file.write("OPENAI_API_KEY=\n")
package_dir = Path(__file__).parent
templates_dir = package_dir / "templates" / "flow"

View File

@@ -59,21 +59,21 @@ OPENAI_COMPATIBLE_PROVIDERS: dict[str, ProviderConfig] = {
api_key_env="OLLAMA_API_KEY",
base_url_env="OLLAMA_HOST",
api_key_required=False,
default_api_key="ollama",
default_api_key=os.getenv("OLLAMA_DEFAULT_API_KEY", "ollama"),
),
"ollama_chat": ProviderConfig(
base_url="http://localhost:11434/v1",
api_key_env="OLLAMA_API_KEY",
base_url_env="OLLAMA_HOST",
api_key_required=False,
default_api_key="ollama",
default_api_key=os.getenv("OLLAMA_DEFAULT_API_KEY", "ollama"),
),
"hosted_vllm": ProviderConfig(
base_url="http://localhost:8000/v1",
api_key_env="VLLM_API_KEY",
base_url_env="VLLM_BASE_URL",
api_key_required=False,
default_api_key="dummy",
default_api_key=os.getenv("VLLM_DEFAULT_API_KEY", "no-key-required"),
),
"cerebras": ProviderConfig(
base_url="https://api.cerebras.ai/v1",

View File

@@ -363,11 +363,11 @@ def build_embedder(spec): # type: ignore[no-untyped-def]
# From dictionary specification
embedder = build_embedder({
"provider": "openai",
"config": {"api_key": "sk-..."}
"config": {"api_key": os.environ["OPENAI_API_KEY"]}
})
# From provider instance
provider = OpenAIProvider(api_key="sk-...")
provider = OpenAIProvider(api_key=os.environ["OPENAI_API_KEY"])
embedder = build_embedder(provider)
"""
if isinstance(spec, BaseEmbeddingsProvider):

View File

@@ -45,9 +45,9 @@ class GoogleGenAIVertexEmbeddingFunction(EmbeddingFunction[Documents]):
model_name="gemini-embedding-001"
)
# Using API key (new SDK only)
# Using API key from environment variable (new SDK only)
embedder = GoogleGenAIVertexEmbeddingFunction(
api_key="your-api-key",
api_key=os.environ["GOOGLE_API_KEY"],
model_name="gemini-embedding-001"
)
"""

View File

@@ -49,9 +49,9 @@ class VertexAIProvider(BaseEmbeddingsProvider[GoogleGenAIVertexEmbeddingFunction
model_name="gemini-embedding-001"
)
# New model with API key
# New model with API key (from environment variable)
provider = VertexAIProvider(
api_key="your-api-key",
api_key=os.environ["GOOGLE_API_KEY"],
model_name="gemini-embedding-001"
)
"""

View File

@@ -79,7 +79,7 @@ class TestProviderRegistry:
assert config.base_url == "http://localhost:8000/v1"
assert config.api_key_env == "VLLM_API_KEY"
assert config.api_key_required is False
assert config.default_api_key == "dummy"
assert config.default_api_key == "no-key-required"
def test_cerebras_config(self):
"""Test Cerebras provider configuration."""

View File

@@ -0,0 +1,184 @@
"""Tests to detect and prevent hardcoded secrets in the codebase.
These tests scan source files for patterns that look like hardcoded secrets
(API keys, tokens, passwords) to prevent accidental credential leaks.
"""
import os
import re
import tempfile
from pathlib import Path
from unittest.mock import patch
from crewai.cli.create_flow import create_flow
from crewai.llms.providers.openai_compatible.completion import (
OPENAI_COMPATIBLE_PROVIDERS,
)
# Root of the workspace
WORKSPACE_ROOT = Path(__file__).resolve().parents[4]
CREWAI_SRC = WORKSPACE_ROOT / "lib" / "crewai" / "src"
CREWAI_TOOLS_SRC = WORKSPACE_ROOT / "lib" / "crewai-tools" / "src"
# Patterns that indicate hardcoded secrets in source code (not docs/tests)
SECRET_PATTERNS = [
# Actual API key formats
re.compile(r'''["']sk-proj-[a-zA-Z0-9_-]{20,}["']'''),
re.compile(r'''["']sk-ant-api[a-zA-Z0-9_-]{20,}["']'''),
re.compile(r'''["']ghp_[a-zA-Z0-9]{36}["']'''),
re.compile(r'''["']gho_[a-zA-Z0-9]{36}["']'''),
re.compile(r'''["']xox[bpas]-[a-zA-Z0-9-]{10,}["']'''),
re.compile(r'''["']AKIA[A-Z0-9]{16}["']'''),
# os.environ assignment with hardcoded non-empty value
re.compile(r'''os\.environ\[["'][A-Z_]*(?:KEY|TOKEN|SECRET|PASSWORD)["']\]\s*=\s*["'][^"']+["']'''),
]
# Files/directories to skip (tests, docs, examples patterns in docstrings are OK)
SKIP_DIRS = {
"tests",
"test",
"__pycache__",
".git",
"cassettes",
"node_modules",
".venv",
}
def _get_python_source_files(root: Path) -> list[Path]:
"""Get all Python source files, excluding test directories."""
files = []
for path in root.rglob("*.py"):
parts = set(path.parts)
if parts & SKIP_DIRS:
continue
files.append(path)
return files
class TestNoHardcodedSecrets:
"""Test that source code does not contain hardcoded secrets."""
def test_no_real_api_keys_in_source(self):
"""Verify no real API key patterns exist in source code."""
violations = []
for src_root in [CREWAI_SRC, CREWAI_TOOLS_SRC]:
if not src_root.exists():
continue
for filepath in _get_python_source_files(src_root):
content = filepath.read_text(errors="ignore")
for pattern in SECRET_PATTERNS:
for match in pattern.finditer(content):
# Get the line number
line_num = content[: match.start()].count("\n") + 1
violations.append(
f"{filepath.relative_to(WORKSPACE_ROOT)}:{line_num}: {match.group()}"
)
assert not violations, (
f"Found {len(violations)} potential hardcoded secret(s):\n"
+ "\n".join(violations)
)
def test_no_env_assignment_with_hardcoded_keys(self):
"""Verify no os.environ['KEY'] = 'hardcoded-value' patterns in source (non-test) code."""
pattern = re.compile(
r'''os\.environ\[["'](\w*(?:KEY|TOKEN|SECRET|PASSWORD)\w*)["']\]\s*=\s*["']([^"']+)["']'''
)
# Config flags that are not secrets
ALLOWED_ENV_ASSIGNMENTS = {
"TOKENIZERS_PARALLELISM",
}
violations = []
for src_root in [CREWAI_SRC, CREWAI_TOOLS_SRC]:
if not src_root.exists():
continue
for filepath in _get_python_source_files(src_root):
content = filepath.read_text(errors="ignore")
for match in pattern.finditer(content):
env_var_name = match.group(1)
if env_var_name in ALLOWED_ENV_ASSIGNMENTS:
continue
line_num = content[: match.start()].count("\n") + 1
violations.append(
f"{filepath.relative_to(WORKSPACE_ROOT)}:{line_num}: "
f"os.environ['{match.group(1)}'] = '{match.group(2)}'"
)
assert not violations, (
f"Found {len(violations)} hardcoded environment variable assignment(s):\n"
+ "\n".join(violations)
+ "\n\nUse os.environ.get() or read from .env files instead."
)
class TestCreateFlowEnvFile:
"""Test that create_flow generates .env files without hardcoded secret values."""
def test_create_flow_env_file_has_no_hardcoded_api_key(self):
"""Verify create_flow does not write a hardcoded API key value."""
with tempfile.TemporaryDirectory() as temp_dir:
original_cwd = os.getcwd()
try:
os.chdir(temp_dir)
create_flow("test_flow")
env_file = Path(temp_dir) / "test_flow" / ".env"
assert env_file.exists(), ".env file should be created"
content = env_file.read_text()
assert "YOUR_API_KEY" not in content, (
".env should not contain hardcoded placeholder 'YOUR_API_KEY'"
)
# The key name should be present but without a hardcoded value
assert "OPENAI_API_KEY" in content, (
".env should contain the OPENAI_API_KEY variable name"
)
finally:
os.chdir(original_cwd)
class TestProviderDefaultApiKeys:
"""Test that provider default API keys use environment variable lookups."""
def test_ollama_default_api_key_from_env(self):
"""Verify Ollama default API key can be overridden via environment variable."""
with patch.dict(os.environ, {"OLLAMA_DEFAULT_API_KEY": "custom-ollama-key"}, clear=False):
# Re-import to pick up new env var - but since module-level dict is already
# evaluated, we test the env var pattern is used in the config
config = OPENAI_COMPATIBLE_PROVIDERS["ollama"]
# The default_api_key should be set (either from env or fallback)
assert config.default_api_key is not None
def test_vllm_default_api_key_not_dummy(self):
"""Verify hosted_vllm default API key is not the literal string 'dummy'."""
config = OPENAI_COMPATIBLE_PROVIDERS["hosted_vllm"]
assert config.default_api_key != "dummy", (
"hosted_vllm should not use 'dummy' as a hardcoded default API key"
)
assert config.default_api_key is not None
def test_ollama_default_api_key_fallback(self):
"""Verify Ollama uses 'ollama' as fallback when env var is not set."""
# When OLLAMA_DEFAULT_API_KEY is not set, should fall back to "ollama"
env = os.environ.copy()
env.pop("OLLAMA_DEFAULT_API_KEY", None)
with patch.dict(os.environ, env, clear=True):
# The config was already created at module load time, so we check
# the current value
config = OPENAI_COMPATIBLE_PROVIDERS["ollama"]
assert config.default_api_key is not None
def test_all_providers_have_valid_config(self):
"""Verify all providers have properly configured API key settings."""
for provider_name, config in OPENAI_COMPATIBLE_PROVIDERS.items():
assert config.api_key_env, (
f"Provider '{provider_name}' must have api_key_env configured"
)
if not config.api_key_required:
assert config.default_api_key is not None, (
f"Provider '{provider_name}' with api_key_required=False "
"must have a default_api_key"
)

View File

@@ -156,6 +156,33 @@ def update_version_in_file(file_path: Path, new_version: str) -> bool:
return False
def update_pyproject_version(file_path: Path, new_version: str) -> bool:
"""Update the [project] version field in a pyproject.toml file.
Args:
file_path: Path to pyproject.toml file.
new_version: New version string.
Returns:
True if version was updated, False otherwise.
"""
if not file_path.exists():
return False
content = file_path.read_text()
new_content = re.sub(
r'^(version\s*=\s*")[^"]+(")',
rf"\g<1>{new_version}\2",
content,
count=1,
flags=re.MULTILINE,
)
if new_content != content:
file_path.write_text(new_content)
return True
return False
_DEFAULT_WORKSPACE_PACKAGES: Final[list[str]] = [
"crewai",
"crewai-tools",
@@ -1045,10 +1072,84 @@ def _update_enterprise_crewai_dep(pyproject_path: Path, version: str) -> bool:
return False
_DEPLOYMENT_TEST_REPO: Final[str] = "crewAIInc/crew_deployment_test"
_PYPI_POLL_INTERVAL: Final[int] = 15
_PYPI_POLL_TIMEOUT: Final[int] = 600
def _update_deployment_test_repo(version: str, is_prerelease: bool) -> None:
"""Update the deployment test repo to pin the new crewai version.
Clones the repo, updates the crewai[tools] pin in pyproject.toml,
regenerates the lockfile, commits, and pushes directly to main.
Args:
version: New crewai version string.
is_prerelease: Whether this is a pre-release version.
"""
console.print(
f"\n[bold cyan]Updating {_DEPLOYMENT_TEST_REPO} to {version}[/bold cyan]"
)
with tempfile.TemporaryDirectory() as tmp:
repo_dir = Path(tmp) / "crew_deployment_test"
run_command(["gh", "repo", "clone", _DEPLOYMENT_TEST_REPO, str(repo_dir)])
console.print(f"[green]✓[/green] Cloned {_DEPLOYMENT_TEST_REPO}")
pyproject = repo_dir / "pyproject.toml"
content = pyproject.read_text()
new_content = re.sub(
r'"crewai\[tools\]==[^"]+"',
f'"crewai[tools]=={version}"',
content,
)
if new_content == content:
console.print(
"[yellow]Warning:[/yellow] No crewai[tools] pin found to update"
)
return
pyproject.write_text(new_content)
console.print(f"[green]✓[/green] Updated crewai[tools] pin to {version}")
lock_cmd = [
"uv",
"lock",
"--refresh-package",
"crewai",
"--refresh-package",
"crewai-tools",
]
if is_prerelease:
lock_cmd.append("--prerelease=allow")
max_retries = 10
for attempt in range(1, max_retries + 1):
try:
run_command(lock_cmd, cwd=repo_dir)
break
except subprocess.CalledProcessError:
if attempt == max_retries:
console.print(
f"[red]Error:[/red] uv lock failed after {max_retries} attempts"
)
raise
console.print(
f"[yellow]uv lock failed (attempt {attempt}/{max_retries}),"
f" retrying in {_PYPI_POLL_INTERVAL}s...[/yellow]"
)
time.sleep(_PYPI_POLL_INTERVAL)
console.print("[green]✓[/green] Lockfile updated")
run_command(["git", "add", "pyproject.toml", "uv.lock"], cwd=repo_dir)
run_command(
["git", "commit", "-m", f"chore: bump crewai to {version}"],
cwd=repo_dir,
)
run_command(["git", "push"], cwd=repo_dir)
console.print(f"[green]✓[/green] Pushed to {_DEPLOYMENT_TEST_REPO}")
def _wait_for_pypi(package: str, version: str) -> None:
"""Poll PyPI until a specific package version is available.
@@ -1141,6 +1242,11 @@ def _release_enterprise(version: str, is_prerelease: bool, dry_run: bool) -> Non
pyproject = pkg_dir / "pyproject.toml"
if pyproject.exists():
if update_pyproject_version(pyproject, version):
console.print(
f"[green]✓[/green] Updated version in: "
f"{pyproject.relative_to(repo_dir)}"
)
if update_pyproject_dependencies(
pyproject, version, extra_packages=list(_ENTERPRISE_EXTRA_PACKAGES)
):
@@ -1159,19 +1265,35 @@ def _release_enterprise(version: str, is_prerelease: bool, dry_run: bool) -> Non
_wait_for_pypi("crewai", version)
console.print("\nSyncing workspace...")
run_command(
[
"uv",
"sync",
"--refresh-package",
"crewai",
"--refresh-package",
"crewai-tools",
"--refresh-package",
"crewai-files",
],
cwd=repo_dir,
)
sync_cmd = [
"uv",
"sync",
"--refresh-package",
"crewai",
"--refresh-package",
"crewai-tools",
"--refresh-package",
"crewai-files",
]
if is_prerelease:
sync_cmd.append("--prerelease=allow")
max_retries = 10
for attempt in range(1, max_retries + 1):
try:
run_command(sync_cmd, cwd=repo_dir)
break
except subprocess.CalledProcessError:
if attempt == max_retries:
console.print(
f"[red]Error:[/red] uv sync failed after {max_retries} attempts"
)
raise
console.print(
f"[yellow]uv sync failed (attempt {attempt}/{max_retries}),"
f" retrying in {_PYPI_POLL_INTERVAL}s...[/yellow]"
)
time.sleep(_PYPI_POLL_INTERVAL)
console.print("[green]✓[/green] Workspace synced")
# --- branch, commit, push, PR ---
@@ -1187,7 +1309,7 @@ def _release_enterprise(version: str, is_prerelease: bool, dry_run: bool) -> Non
run_command(["git", "push", "-u", "origin", branch_name], cwd=repo_dir)
console.print("[green]✓[/green] Branch pushed")
run_command(
pr_url = run_command(
[
"gh",
"pr",
@@ -1204,6 +1326,7 @@ def _release_enterprise(version: str, is_prerelease: bool, dry_run: bool) -> Non
cwd=repo_dir,
)
console.print("[green]✓[/green] Enterprise bump PR created")
console.print(f"[cyan]PR URL:[/cyan] {pr_url}")
_poll_pr_until_merged(branch_name, "enterprise bump PR", repo=enterprise_repo)
@@ -1570,7 +1693,18 @@ def tag(dry_run: bool, no_edit: bool) -> None:
is_flag=True,
help="Skip the enterprise release phase",
)
def release(version: str, dry_run: bool, no_edit: bool, skip_enterprise: bool) -> None:
@click.option(
"--skip-to-enterprise",
is_flag=True,
help="Skip phases 1 & 2, run only the enterprise release phase",
)
def release(
version: str,
dry_run: bool,
no_edit: bool,
skip_enterprise: bool,
skip_to_enterprise: bool,
) -> None:
"""Full release: bump versions, tag, and publish a GitHub release.
Combines bump and tag into a single workflow. Creates a version bump PR,
@@ -1583,11 +1717,19 @@ def release(version: str, dry_run: bool, no_edit: bool, skip_enterprise: bool) -
dry_run: Show what would be done without making changes.
no_edit: Skip editing release notes.
skip_enterprise: Skip the enterprise release phase.
skip_to_enterprise: Skip phases 1 & 2, run only the enterprise release phase.
"""
try:
check_gh_installed()
if not skip_enterprise:
if skip_enterprise and skip_to_enterprise:
console.print(
"[red]Error:[/red] Cannot use both --skip-enterprise "
"and --skip-to-enterprise"
)
sys.exit(1)
if not skip_enterprise or skip_to_enterprise:
missing: list[str] = []
if not _ENTERPRISE_REPO:
missing.append("ENTERPRISE_REPO")
@@ -1606,6 +1748,15 @@ def release(version: str, dry_run: bool, no_edit: bool, skip_enterprise: bool) -
cwd = Path.cwd()
lib_dir = cwd / "lib"
is_prerelease = _is_prerelease(version)
if skip_to_enterprise:
_release_enterprise(version, is_prerelease, dry_run)
console.print(
f"\n[green]✓[/green] Enterprise release [bold]{version}[/bold] complete!"
)
return
if not dry_run:
console.print("Checking git status...")
check_git_clean()
@@ -1699,7 +1850,8 @@ def release(version: str, dry_run: bool, no_edit: bool, skip_enterprise: bool) -
if not dry_run:
_create_tag_and_release(tag_name, release_notes, is_prerelease)
_trigger_pypi_publish(tag_name, wait=not skip_enterprise)
_trigger_pypi_publish(tag_name, wait=True)
_update_deployment_test_repo(version, is_prerelease)
if not skip_enterprise:
_release_enterprise(version, is_prerelease, dry_run)