Merge branch 'main' into lg-preserve-null-types-tools

This commit is contained in:
Greyson LaLonde
2026-02-25 16:51:05 -05:00
committed by GitHub
12 changed files with 859 additions and 87 deletions

View File

@@ -50,6 +50,7 @@ from crewai.utilities.agent_utils import (
handle_unknown_error,
has_reached_max_iterations,
is_context_length_exceeded,
parse_tool_call_args,
process_llm_response,
track_delegation_if_needed,
)
@@ -894,13 +895,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
ToolUsageStartedEvent,
)
if isinstance(func_args, str):
try:
args_dict = json.loads(func_args)
except json.JSONDecodeError:
args_dict = {}
else:
args_dict = func_args
args_dict, parse_error = parse_tool_call_args(func_args, func_name, call_id, original_tool)
if parse_error is not None:
return parse_error
if original_tool is None:
for tool in self.original_tools or []:

View File

@@ -66,6 +66,7 @@ from crewai.utilities.agent_utils import (
has_reached_max_iterations,
is_context_length_exceeded,
is_inside_event_loop,
parse_tool_call_args,
process_llm_response,
track_delegation_if_needed,
)
@@ -848,13 +849,9 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
call_id, func_name, func_args = info
# Parse arguments
if isinstance(func_args, str):
try:
args_dict = json.loads(func_args)
except json.JSONDecodeError:
args_dict = {}
else:
args_dict = func_args
args_dict, parse_error = parse_tool_call_args(func_args, func_name, call_id)
if parse_error is not None:
return parse_error
# Get agent_key for event tracking
agent_key = getattr(self.agent, "key", "unknown") if self.agent else "unknown"

View File

@@ -18,6 +18,7 @@ from pydantic import (
BaseModel as PydanticBaseModel,
ConfigDict,
Field,
ValidationError,
create_model,
field_validator,
)
@@ -150,14 +151,37 @@ class BaseTool(BaseModel, ABC):
super().model_post_init(__context)
def _validate_kwargs(self, kwargs: dict[str, Any]) -> dict[str, Any]:
"""Validate keyword arguments against args_schema if present.
Args:
kwargs: The keyword arguments to validate.
Returns:
Validated (and possibly coerced) keyword arguments.
Raises:
ValueError: If validation against args_schema fails.
"""
if kwargs and self.args_schema is not None and self.args_schema.model_fields:
try:
validated = self.args_schema.model_validate(kwargs)
return validated.model_dump()
except Exception as e:
raise ValueError(
f"Tool '{self.name}' arguments validation failed: {e}"
) from e
return kwargs
def run(
self,
*args: Any,
**kwargs: Any,
) -> Any:
kwargs = self._validate_kwargs(kwargs)
result = self._run(*args, **kwargs)
# If _run is async, we safely run it
if asyncio.iscoroutine(result):
result = asyncio.run(result)
@@ -179,6 +203,7 @@ class BaseTool(BaseModel, ABC):
Returns:
The result of the tool execution.
"""
kwargs = self._validate_kwargs(kwargs)
result = await self._arun(*args, **kwargs)
self.current_usage_count += 1
return result
@@ -331,6 +356,8 @@ class Tool(BaseTool, Generic[P, R]):
Returns:
The result of the tool execution.
"""
kwargs = self._validate_kwargs(kwargs)
result = self.func(*args, **kwargs)
if asyncio.iscoroutine(result):
@@ -361,6 +388,7 @@ class Tool(BaseTool, Generic[P, R]):
Returns:
The result of the tool execution.
"""
kwargs = self._validate_kwargs(kwargs)
result = await self._arun(*args, **kwargs)
self.current_usage_count += 1
return result

View File

@@ -1148,6 +1148,36 @@ def extract_tool_call_info(
return None
def parse_tool_call_args(
func_args: dict[str, Any] | str,
func_name: str,
call_id: str,
original_tool: Any = None,
) -> tuple[dict[str, Any], None] | tuple[None, dict[str, Any]]:
"""Parse tool call arguments from a JSON string or dict.
Returns:
``(args_dict, None)`` on success, or ``(None, error_result)`` on
JSON parse failure where ``error_result`` is a ready-to-return dict
with the same shape as ``_execute_single_native_tool_call`` return values.
"""
if isinstance(func_args, str):
try:
return json.loads(func_args), None
except json.JSONDecodeError as e:
return None, {
"call_id": call_id,
"func_name": func_name,
"result": (
f"Error: Failed to parse tool arguments as JSON: {e}. "
f"Please provide valid JSON arguments for the '{func_name}' tool."
),
"from_cache": False,
"original_tool": original_tool,
}
return func_args, None
def _setup_before_llm_call_hooks(
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None,
printer: Printer,

View File

@@ -11,7 +11,7 @@ import os
import threading
import time
from collections import Counter
from unittest.mock import patch
from unittest.mock import Mock, patch
import pytest
from pydantic import BaseModel, Field
@@ -1129,3 +1129,150 @@ class TestMaxUsageCountWithNativeToolCalling:
# Verify the requested calls occurred while keeping usage bounded.
assert tool.current_usage_count >= 2
assert tool.current_usage_count <= tool.max_usage_count
# =============================================================================
# JSON Parse Error Handling Tests
# =============================================================================
class TestNativeToolCallingJsonParseError:
"""Tests that malformed JSON tool arguments produce clear errors
instead of silently dropping all arguments."""
def _make_executor(self, tools: list[BaseTool]) -> "CrewAgentExecutor":
"""Create a minimal CrewAgentExecutor with mocked dependencies."""
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.tools.base_tool import to_langchain
structured_tools = to_langchain(tools)
mock_agent = Mock()
mock_agent.key = "test_agent"
mock_agent.role = "tester"
mock_agent.verbose = False
mock_agent.fingerprint = None
mock_agent.tools_results = []
mock_task = Mock()
mock_task.name = "test"
mock_task.description = "test"
mock_task.id = "test-id"
executor = object.__new__(CrewAgentExecutor)
executor.agent = mock_agent
executor.task = mock_task
executor.crew = Mock()
executor.tools = structured_tools
executor.original_tools = tools
executor.tools_handler = None
executor._printer = Mock()
executor.messages = []
return executor
def test_malformed_json_returns_parse_error(self) -> None:
"""Malformed JSON args must return a descriptive error, not silently become {}."""
class CodeTool(BaseTool):
name: str = "execute_code"
description: str = "Run code"
def _run(self, code: str) -> str:
return f"ran: {code}"
tool = CodeTool()
executor = self._make_executor([tool])
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
_, available_functions = convert_tools_to_openai_schema([tool])
malformed_json = '{"code": "print("hello")"}'
result = executor._execute_single_native_tool_call(
call_id="call_123",
func_name="execute_code",
func_args=malformed_json,
available_functions=available_functions,
)
assert "Failed to parse tool arguments as JSON" in result["result"]
assert tool.current_usage_count == 0
def test_valid_json_still_executes_normally(self) -> None:
"""Valid JSON args should execute the tool as before."""
class CodeTool(BaseTool):
name: str = "execute_code"
description: str = "Run code"
def _run(self, code: str) -> str:
return f"ran: {code}"
tool = CodeTool()
executor = self._make_executor([tool])
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
_, available_functions = convert_tools_to_openai_schema([tool])
valid_json = '{"code": "print(1)"}'
result = executor._execute_single_native_tool_call(
call_id="call_456",
func_name="execute_code",
func_args=valid_json,
available_functions=available_functions,
)
assert result["result"] == "ran: print(1)"
def test_dict_args_bypass_json_parsing(self) -> None:
"""When func_args is already a dict, no JSON parsing occurs."""
class CodeTool(BaseTool):
name: str = "execute_code"
description: str = "Run code"
def _run(self, code: str) -> str:
return f"ran: {code}"
tool = CodeTool()
executor = self._make_executor([tool])
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
_, available_functions = convert_tools_to_openai_schema([tool])
result = executor._execute_single_native_tool_call(
call_id="call_789",
func_name="execute_code",
func_args={"code": "x = 42"},
available_functions=available_functions,
)
assert result["result"] == "ran: x = 42"
def test_schema_validation_catches_missing_args_on_native_path(self) -> None:
"""The native function calling path should now enforce args_schema,
catching missing required fields before _run is called."""
class StrictTool(BaseTool):
name: str = "strict_tool"
description: str = "A tool with required args"
def _run(self, code: str, language: str) -> str:
return f"{language}: {code}"
tool = StrictTool()
executor = self._make_executor([tool])
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
_, available_functions = convert_tools_to_openai_schema([tool])
result = executor._execute_single_native_tool_call(
call_id="call_schema",
func_name="strict_tool",
func_args={"code": "print(1)"},
available_functions=available_functions,
)
assert "Error" in result["result"]
assert "validation failed" in result["result"].lower() or "missing" in result["result"].lower()

View File

@@ -3,6 +3,8 @@ from typing import Callable
from unittest.mock import patch
import pytest
from pydantic import BaseModel, Field
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.task import Task
@@ -230,3 +232,204 @@ def test_max_usage_count_is_respected():
crew.kickoff()
assert tool.max_usage_count == 5
assert tool.current_usage_count == 5
# =============================================================================
# Schema Validation in run() Tests
# =============================================================================
class CodeExecutorInput(BaseModel):
code: str = Field(description="The code to execute")
language: str = Field(default="python", description="Programming language")
class CodeExecutorTool(BaseTool):
name: str = "code_executor"
description: str = "Execute code snippets"
args_schema: type[BaseModel] = CodeExecutorInput
def _run(self, code: str, language: str = "python") -> str:
return f"Executed {language}: {code}"
class TestBaseToolRunValidation:
"""Tests for args_schema validation in BaseTool.run()."""
def test_run_with_valid_kwargs_passes_validation(self) -> None:
"""Valid keyword arguments should pass schema validation and execute."""
t = CodeExecutorTool()
result = t.run(code="print('hello')")
assert result == "Executed python: print('hello')"
def test_run_with_all_kwargs_passes_validation(self) -> None:
"""All keyword arguments including optional ones should pass."""
t = CodeExecutorTool()
result = t.run(code="console.log('hi')", language="javascript")
assert result == "Executed javascript: console.log('hi')"
def test_run_with_missing_required_kwarg_raises(self) -> None:
"""Missing required kwargs should raise ValueError from schema validation."""
t = CodeExecutorTool()
with pytest.raises(ValueError, match="validation failed"):
t.run(language="python")
def test_run_with_wrong_field_name_raises(self) -> None:
"""Kwargs not matching any schema field should trigger validation error
for missing required fields."""
t = CodeExecutorTool()
with pytest.raises(ValueError, match="validation failed"):
t.run(wrong_arg="value")
def test_run_with_positional_args_skips_validation(self) -> None:
"""Positional-arg calls should bypass schema validation (backwards compat)."""
class SimpleTool(BaseTool):
name: str = "simple"
description: str = "A simple tool"
def _run(self, question: str) -> str:
return question
t = SimpleTool()
result = t.run("What is life?")
assert result == "What is life?"
def test_run_strips_extra_kwargs_from_llm(self) -> None:
"""Extra kwargs not in the schema should be silently stripped,
preventing unexpected-keyword crashes in _run."""
t = CodeExecutorTool()
result = t.run(code="1+1", extra_hallucinated_field="junk")
assert result == "Executed python: 1+1"
def test_run_increments_usage_after_validation(self) -> None:
"""Usage count should still increment after validated execution."""
t = CodeExecutorTool()
assert t.current_usage_count == 0
t.run(code="x = 1")
assert t.current_usage_count == 1
def test_run_does_not_increment_usage_on_validation_error(self) -> None:
"""Usage count should NOT increment when validation fails."""
t = CodeExecutorTool()
assert t.current_usage_count == 0
with pytest.raises(ValueError):
t.run(wrong="bad")
assert t.current_usage_count == 0
class TestToolDecoratorRunValidation:
"""Tests for args_schema validation in Tool.run() (decorator-based tools)."""
def test_decorator_tool_run_validates_kwargs(self) -> None:
"""Decorator-created tools should also validate kwargs against schema."""
@tool("execute_code")
def execute_code(code: str, language: str = "python") -> str:
"""Execute a code snippet."""
return f"Executed {language}: {code}"
result = execute_code.run(code="x = 1")
assert result == "Executed python: x = 1"
def test_decorator_tool_run_rejects_missing_required(self) -> None:
"""Decorator tools should reject missing required args via validation."""
@tool("execute_code")
def execute_code(code: str) -> str:
"""Execute a code snippet."""
return f"Executed: {code}"
with pytest.raises(ValueError, match="validation failed"):
execute_code.run(wrong_arg="value")
def test_decorator_tool_positional_args_still_work(self) -> None:
"""Positional args to decorator tools should bypass validation."""
@tool("greet")
def greet(name: str) -> str:
"""Greet someone."""
return f"Hello, {name}!"
result = greet.run("World")
assert result == "Hello, World!"
# =============================================================================
# Async arun() Schema Validation Tests
# =============================================================================
class AsyncCodeExecutorTool(BaseTool):
name: str = "async_code_executor"
description: str = "Execute code snippets asynchronously"
args_schema: type[BaseModel] = CodeExecutorInput
async def _arun(self, code: str, language: str = "python") -> str:
return f"Async executed {language}: {code}"
def _run(self, code: str, language: str = "python") -> str:
return f"Executed {language}: {code}"
class TestBaseToolArunValidation:
"""Tests for args_schema validation in BaseTool.arun()."""
@pytest.mark.asyncio
async def test_arun_with_valid_kwargs_passes_validation(self) -> None:
"""Valid keyword arguments should pass schema validation in arun."""
t = AsyncCodeExecutorTool()
result = await t.arun(code="print('hello')")
assert result == "Async executed python: print('hello')"
@pytest.mark.asyncio
async def test_arun_with_missing_required_kwarg_raises(self) -> None:
"""Missing required kwargs should raise ValueError in arun."""
t = AsyncCodeExecutorTool()
with pytest.raises(ValueError, match="validation failed"):
await t.arun(language="python")
@pytest.mark.asyncio
async def test_arun_with_wrong_field_name_raises(self) -> None:
"""Kwargs not matching schema fields should trigger validation error in arun."""
t = AsyncCodeExecutorTool()
with pytest.raises(ValueError, match="validation failed"):
await t.arun(wrong_arg="value")
@pytest.mark.asyncio
async def test_arun_strips_extra_kwargs(self) -> None:
"""Extra kwargs not in the schema should be stripped in arun."""
t = AsyncCodeExecutorTool()
result = await t.arun(code="1+1", extra_field="junk")
assert result == "Async executed python: 1+1"
@pytest.mark.asyncio
async def test_arun_does_not_increment_usage_on_validation_error(self) -> None:
"""Usage count should NOT increment when arun validation fails."""
t = AsyncCodeExecutorTool()
assert t.current_usage_count == 0
with pytest.raises(ValueError):
await t.arun(wrong="bad")
assert t.current_usage_count == 0
class TestToolDecoratorArunValidation:
"""Tests for args_schema validation in Tool.arun() (decorator-based async tools)."""
@pytest.mark.asyncio
async def test_async_decorator_tool_arun_validates_kwargs(self) -> None:
"""Async decorator tools should validate kwargs in arun."""
@tool("async_execute")
async def async_execute(code: str, language: str = "python") -> str:
"""Execute code asynchronously."""
return f"Async {language}: {code}"
result = await async_execute.arun(code="x = 1")
assert result == "Async python: x = 1"
@pytest.mark.asyncio
async def test_async_decorator_tool_arun_rejects_missing_required(self) -> None:
"""Async decorator tools should reject missing required args in arun."""
@tool("async_execute")
async def async_execute(code: str) -> str:
"""Execute code asynchronously."""
return f"Async: {code}"
with pytest.raises(ValueError, match="validation failed"):
await async_execute.arun(wrong_arg="value")

View File

@@ -17,6 +17,7 @@ from crewai.utilities.agent_utils import (
_format_messages_for_summary,
_split_messages_into_chunks,
convert_tools_to_openai_schema,
parse_tool_call_args,
summarize_messages,
)
@@ -995,3 +996,56 @@ class TestParallelSummarizationVCR:
assert summary_msg["role"] == "user"
assert "files" in summary_msg
assert "report.pdf" in summary_msg["files"]
class TestParseToolCallArgs:
"""Unit tests for parse_tool_call_args."""
def test_valid_json_string_returns_dict(self) -> None:
args_dict, error = parse_tool_call_args('{"code": "print(1)"}', "run_code", "call_1")
assert error is None
assert args_dict == {"code": "print(1)"}
def test_malformed_json_returns_error_dict(self) -> None:
args_dict, error = parse_tool_call_args('{"code": "print("hi")"}', "run_code", "call_1")
assert args_dict is None
assert error is not None
assert error["call_id"] == "call_1"
assert error["func_name"] == "run_code"
assert error["from_cache"] is False
assert "Failed to parse tool arguments as JSON" in error["result"]
assert "run_code" in error["result"]
def test_malformed_json_preserves_original_tool(self) -> None:
mock_tool = object()
_, error = parse_tool_call_args("{bad}", "my_tool", "call_2", original_tool=mock_tool)
assert error is not None
assert error["original_tool"] is mock_tool
def test_malformed_json_original_tool_defaults_to_none(self) -> None:
_, error = parse_tool_call_args("{bad}", "my_tool", "call_3")
assert error is not None
assert error["original_tool"] is None
def test_dict_input_returned_directly(self) -> None:
func_args = {"code": "x = 42"}
args_dict, error = parse_tool_call_args(func_args, "run_code", "call_4")
assert error is None
assert args_dict == {"code": "x = 42"}
def test_empty_dict_input_returned_directly(self) -> None:
args_dict, error = parse_tool_call_args({}, "run_code", "call_5")
assert error is None
assert args_dict == {}
def test_valid_json_with_nested_values(self) -> None:
args_dict, error = parse_tool_call_args(
'{"query": "hello", "options": {"limit": 10}}', "search", "call_6"
)
assert error is None
assert args_dict == {"query": "hello", "options": {"limit": 10}}
def test_error_result_has_correct_keys(self) -> None:
_, error = parse_tool_call_args("{bad json}", "tool", "call_7")
assert error is not None
assert set(error.keys()) == {"call_id", "func_name", "result", "from_cache", "original_tool"}

View File

@@ -14,7 +14,7 @@ from rich.markdown import Markdown
from rich.panel import Panel
from rich.prompt import Confirm
from crewai_devtools.prompts import RELEASE_NOTES_PROMPT
from crewai_devtools.prompts import RELEASE_NOTES_PROMPT, TRANSLATE_RELEASE_NOTES_PROMPT
load_dotenv()
@@ -191,6 +191,248 @@ def update_pyproject_dependencies(file_path: Path, new_version: str) -> bool:
return False
def add_docs_version(docs_json_path: Path, version: str) -> bool:
"""Add a new version to the Mintlify docs.json versioning config.
Copies the current default version's tabs into a new version entry,
sets the new version as default, and marks the previous default as
non-default. Operates on all languages.
Args:
docs_json_path: Path to docs/docs.json.
version: Version string (e.g., "1.10.0").
Returns:
True if docs.json was updated, False otherwise.
"""
import json
if not docs_json_path.exists():
return False
data = json.loads(docs_json_path.read_text())
version_label = f"v{version}"
updated = False
for lang in data.get("navigation", {}).get("languages", []):
versions = lang.get("versions", [])
if not versions:
continue
# Skip if this version already exists for this language
if any(v.get("version") == version_label for v in versions):
continue
# Find the current default and copy its tabs
default_version = next(
(v for v in versions if v.get("default")),
versions[0],
)
new_version = {
"version": version_label,
"default": True,
"tabs": default_version.get("tabs", []),
}
# Remove default flag from old default
default_version.pop("default", None)
# Insert new version at the beginning
versions.insert(0, new_version)
updated = True
if not updated:
return False
docs_json_path.write_text(json.dumps(data, indent=2, ensure_ascii=False) + "\n")
return True
_PT_BR_MONTHS = {
1: "jan",
2: "fev",
3: "mar",
4: "abr",
5: "mai",
6: "jun",
7: "jul",
8: "ago",
9: "set",
10: "out",
11: "nov",
12: "dez",
}
_CHANGELOG_LOCALES: dict[str, dict[str, str]] = {
"en": {
"link_text": "View release on GitHub",
"language_name": "English",
},
"pt-BR": {
"link_text": "Ver release no GitHub",
"language_name": "Brazilian Portuguese",
},
"ko": {
"link_text": "GitHub 릴리스 보기",
"language_name": "Korean",
},
}
def translate_release_notes(
release_notes: str,
lang: str,
client: OpenAI,
) -> str:
"""Translate release notes into the target language using OpenAI.
Args:
release_notes: English release notes markdown.
lang: Language code (e.g., "pt-BR", "ko").
client: OpenAI client instance.
Returns:
Translated release notes, or original on failure.
"""
locale_cfg = _CHANGELOG_LOCALES.get(lang)
if not locale_cfg:
return release_notes
language_name = locale_cfg["language_name"]
prompt = TRANSLATE_RELEASE_NOTES_PROMPT.substitute(
language=language_name,
release_notes=release_notes,
)
try:
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{
"role": "system",
"content": f"You are a professional translator. Translate technical documentation into {language_name}.",
},
{"role": "user", "content": prompt},
],
temperature=0.3,
)
return response.choices[0].message.content or release_notes
except Exception as e:
console.print(
f"[yellow]Warning:[/yellow] Could not translate to {language_name}: {e}"
)
return release_notes
def _format_changelog_date(lang: str) -> str:
"""Format today's date for a changelog entry in the given language."""
from datetime import datetime
now = datetime.now()
if lang == "ko":
return f"{now.year}{now.month}{now.day}"
if lang == "pt-BR":
return f"{now.day:02d} {_PT_BR_MONTHS[now.month]} {now.year}"
return now.strftime("%b %d, %Y")
def update_changelog(
changelog_path: Path,
version: str,
release_notes: str,
lang: str = "en",
) -> bool:
"""Prepend a new release entry to a docs changelog file.
Args:
changelog_path: Path to the changelog.mdx file.
version: Version string (e.g., "1.9.3").
release_notes: Markdown release notes content.
lang: Language code for localized date/link text.
Returns:
True if changelog was updated, False otherwise.
"""
if not changelog_path.exists():
return False
locale_cfg = _CHANGELOG_LOCALES.get(lang, _CHANGELOG_LOCALES["en"])
date_label = _format_changelog_date(lang)
link_text = locale_cfg["link_text"]
# Indent each non-empty line with 2 spaces to match <Update> block format
indented_lines = []
for line in release_notes.splitlines():
if line.strip():
indented_lines.append(f" {line}")
else:
indented_lines.append("")
indented_notes = "\n".join(indented_lines)
entry = (
f'<Update label="{date_label}">\n'
f" ## v{version}\n"
f"\n"
f" [{link_text}]"
f"(https://github.com/crewAIInc/crewAI/releases/tag/{version})\n"
f"\n"
f"{indented_notes}\n"
f"\n"
f"</Update>"
)
content = changelog_path.read_text()
# Insert after the frontmatter closing ---
parts = content.split("---", 2)
if len(parts) >= 3:
new_content = (
parts[0]
+ "---"
+ parts[1]
+ "---\n"
+ entry
+ "\n\n"
+ parts[2].lstrip("\n")
)
else:
new_content = entry + "\n\n" + content
changelog_path.write_text(new_content)
return True
def update_template_dependencies(templates_dir: Path, new_version: str) -> list[Path]:
"""Update crewai dependency versions in CLI template pyproject.toml files.
Handles both pinned (==) and minimum (>=) version specifiers,
as well as extras like [tools].
Args:
templates_dir: Path to the CLI templates directory.
new_version: New version string.
Returns:
List of paths that were updated.
"""
import re
updated = []
for pyproject in templates_dir.rglob("pyproject.toml"):
content = pyproject.read_text()
new_content = re.sub(
r'"crewai(\[tools\])?(==|>=)[^"]*"',
lambda m: f'"crewai{(m.group(1) or "")!s}=={new_version}"',
content,
)
if new_content != content:
pyproject.write_text(new_content)
updated.append(pyproject)
return updated
def find_version_files(base_path: Path) -> list[Path]:
"""Find all __init__.py files that contain __version__.
@@ -394,6 +636,22 @@ def bump(version: str, dry_run: bool, no_push: bool, no_commit: bool) -> None:
"[yellow]Warning:[/yellow] No __version__ attributes found to update"
)
# Update CLI template pyproject.toml files
templates_dir = lib_dir / "crewai" / "src" / "crewai" / "cli" / "templates"
if templates_dir.exists():
if dry_run:
for tpl in templates_dir.rglob("pyproject.toml"):
console.print(
f"[dim][DRY RUN][/dim] Would update template: {tpl.relative_to(cwd)}"
)
else:
tpl_updated = update_template_dependencies(templates_dir, version)
for tpl in tpl_updated:
console.print(
f"[green]✓[/green] Updated template: {tpl.relative_to(cwd)}"
)
updated_files.append(tpl)
if not dry_run:
console.print("\nSyncing workspace...")
run_command(["uv", "sync"])
@@ -575,9 +833,9 @@ def tag(dry_run: bool, no_edit: bool) -> None:
github_contributors = get_github_contributors(commit_range)
if commits.strip():
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
if commits.strip():
contributors_section = ""
if github_contributors:
contributors_section = f"\n\n## Contributors\n\n{', '.join([f'@{u}' for u in github_contributors])}"
@@ -588,7 +846,7 @@ def tag(dry_run: bool, no_edit: bool) -> None:
contributors_section=contributors_section,
)
response = client.chat.completions.create(
response = openai_client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{
@@ -643,6 +901,77 @@ def tag(dry_run: bool, no_edit: bool) -> None:
"\n[green]✓[/green] Using generated release notes without editing"
)
is_prerelease = any(
indicator in version.lower()
for indicator in ["a", "b", "rc", "alpha", "beta", "dev"]
)
# Update docs: changelogs + version switcher
docs_json_path = cwd / "docs" / "docs.json"
changelog_langs = ["en", "pt-BR", "ko"]
if not dry_run:
docs_files_staged = []
for lang in changelog_langs:
cl_path = cwd / "docs" / lang / "changelog.mdx"
if lang == "en":
notes_for_lang = release_notes
else:
console.print(f"[dim]Translating release notes to {lang}...[/dim]")
notes_for_lang = translate_release_notes(
release_notes, lang, openai_client
)
if update_changelog(cl_path, version, notes_for_lang, lang=lang):
console.print(
f"[green]✓[/green] Updated {cl_path.relative_to(cwd)}"
)
docs_files_staged.append(str(cl_path))
else:
console.print(
f"[yellow]Warning:[/yellow] Changelog not found at {cl_path.relative_to(cwd)}"
)
if not is_prerelease:
if add_docs_version(docs_json_path, version):
console.print(
f"[green]✓[/green] Added v{version} to docs version switcher"
)
docs_files_staged.append(str(docs_json_path))
else:
console.print(
f"[yellow]Warning:[/yellow] docs.json not found at {docs_json_path.relative_to(cwd)}"
)
if docs_files_staged:
for f in docs_files_staged:
run_command(["git", "add", f])
run_command(
[
"git",
"commit",
"-m",
f"docs: update changelog and version for v{version}",
]
)
console.print("[green]✓[/green] Committed docs updates")
run_command(["git", "push"])
console.print("[green]✓[/green] Pushed docs updates")
else:
for lang in changelog_langs:
cl_path = cwd / "docs" / lang / "changelog.mdx"
translated = " (translated)" if lang != "en" else ""
console.print(
f"[dim][DRY RUN][/dim] Would update {cl_path.relative_to(cwd)}{translated}"
)
if not is_prerelease:
console.print(
f"[dim][DRY RUN][/dim] Would add v{version} to docs version switcher"
)
else:
console.print(
"[dim][DRY RUN][/dim] Skipping docs version (pre-release)"
)
if not dry_run:
with console.status(f"[cyan]Creating tag {tag_name}..."):
try:
@@ -660,11 +989,6 @@ def tag(dry_run: bool, no_edit: bool) -> None:
sys.exit(1)
console.print(f"[green]✓[/green] Pushed tag {tag_name}")
is_prerelease = any(
indicator in version.lower()
for indicator in ["a", "b", "rc", "alpha", "beta", "dev"]
)
with console.status("[cyan]Creating GitHub Release..."):
try:
gh_cmd = [

View File

@@ -43,3 +43,18 @@ Instructions:
Keep it professional and clear."""
)
TRANSLATE_RELEASE_NOTES_PROMPT = Template(
"""Translate the following release notes into $language.
$release_notes
Instructions:
- Translate all section headers and descriptions naturally
- Keep markdown formatting (##, ###, -, etc.) exactly as-is
- Keep all proper nouns, code identifiers, class names, and technical terms unchanged
(e.g. "CrewAI", "LiteAgent", "ChromaDB", "MCP", "@username")
- Keep the ## Contributors section and GitHub usernames unchanged
- Do not add or remove any content, only translate"""
)