From 86ce54fc82d92e93b3d6b7d88a4ead74c6374fe5 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Tue, 7 Apr 2026 03:22:30 +0800 Subject: [PATCH 01/21] feat: runtime state checkpointing, event system, and executor refactor - Pass RuntimeState through the event bus and enable entity auto-registration - Introduce checkpointing API: - .checkpoint(), .from_checkpoint(), and async checkpoint support - Provider-based storage with BaseProvider and JsonProvider - Mid-task resume and kickoff() integration - Add EventRecord tracking and full event serialization with subtype preservation - Enable checkpoint fidelity via llm_type and executor_type discriminators - Refactor executor architecture: - Convert executors, tools, prompts, and TokenProcess to BaseModel - Introduce proper base classes with typed fields (CrewAgentExecutorMixin, BaseAgentExecutor) - Add generic from_checkpoint with full LLM serialization - Support executor back-references and resume-safe initialization - Refactor runtime state system: - Move RuntimeState into state/ module with async checkpoint support - Add entity serialization improvements and JSON-safe round-tripping - Implement event scope tracking and replay for accurate resume behavior - Improve tool and schema handling: - Make BaseTool fully serializable with JSON round-trip support - Serialize args_schema via JSON schema and dynamically reconstruct models - Add automatic subclass restoration via tool_type discriminator - Enhance Flow checkpointing: - Support restoring execution state and subclass-aware deserialization - Performance improvements: - Cache handler signature inspection - Optimize event emission and metadata preparation - General cleanup: - Remove dead checkpoint payload structures - Simplify entity registration and serialization logic --- .../tests/test_generate_tool_specs.py | 1 + lib/crewai/pyproject.toml | 1 + lib/crewai/src/crewai/__init__.py | 41 +- lib/crewai/src/crewai/agent/core.py | 18 +- .../crewai/agents/agent_builder/base_agent.py | 94 +++- ...ecutor_mixin.py => base_agent_executor.py} | 40 +- .../utilities/base_token_process.py | 55 +-- .../src/crewai/agents/crew_agent_executor.py | 220 ++++----- .../src/crewai/agents/planner_observer.py | 4 +- lib/crewai/src/crewai/agents/step_executor.py | 4 +- lib/crewai/src/crewai/context.py | 2 +- lib/crewai/src/crewai/crew.py | 130 +++++- lib/crewai/src/crewai/crews/utils.py | 53 ++- lib/crewai/src/crewai/events/event_bus.py | 162 +++++-- lib/crewai/src/crewai/events/event_context.py | 5 + .../src/crewai/events/types/a2a_events.py | 66 +-- .../src/crewai/events/types/agent_events.py | 20 +- .../src/crewai/events/types/crew_events.py | 22 +- .../crewai/events/types/event_bus_types.py | 13 +- .../src/crewai/events/types/flow_events.py | 28 +- .../crewai/events/types/knowledge_events.py | 16 +- .../src/crewai/events/types/llm_events.py | 12 +- .../events/types/llm_guardrail_events.py | 8 +- .../src/crewai/events/types/logging_events.py | 6 +- .../src/crewai/events/types/mcp_events.py | 16 +- .../src/crewai/events/types/memory_events.py | 20 +- .../crewai/events/types/observation_events.py | 14 +- .../crewai/events/types/reasoning_events.py | 8 +- .../src/crewai/events/types/skill_events.py | 12 +- .../src/crewai/events/types/task_events.py | 22 +- .../crewai/events/types/tool_usage_events.py | 14 +- .../src/crewai/events/utils/handlers.py | 24 +- .../src/crewai/experimental/agent_executor.py | 32 +- lib/crewai/src/crewai/flow/flow.py | 50 +++ lib/crewai/src/crewai/lite_agent.py | 2 +- lib/crewai/src/crewai/llm.py | 21 +- lib/crewai/src/crewai/llms/base_llm.py | 23 +- .../llms/providers/anthropic/completion.py | 1 + .../crewai/llms/providers/azure/completion.py | 3 +- .../llms/providers/bedrock/completion.py | 3 +- .../llms/providers/gemini/completion.py | 1 + .../llms/providers/openai/completion.py | 38 +- lib/crewai/src/crewai/runtime_state.py | 18 - lib/crewai/src/crewai/state/__init__.py | 0 lib/crewai/src/crewai/state/event_record.py | 205 +++++++++ .../src/crewai/state/provider/__init__.py | 0 lib/crewai/src/crewai/state/provider/core.py | 81 ++++ .../crewai/state/provider/json_provider.py | 87 ++++ lib/crewai/src/crewai/state/runtime.py | 160 +++++++ lib/crewai/src/crewai/task.py | 10 +- lib/crewai/src/crewai/tools/base_tool.py | 108 ++++- .../src/crewai/tools/structured_tool.py | 95 ++-- .../src/crewai/utilities/agent_utils.py | 12 +- lib/crewai/src/crewai/utilities/prompts.py | 20 +- lib/crewai/src/crewai/utilities/streaming.py | 4 +- .../utilities/token_counter_callback.py | 48 +- .../tests/agents/test_async_agent_executor.py | 67 ++- .../tests/agents/test_native_tool_calling.py | 12 +- .../tests/memory/test_memory_root_scope.py | 72 +-- .../tests/memory/test_unified_memory.py | 36 +- .../test_google_vertex_memory_integration.py | 4 +- lib/crewai/tests/test_crew.py | 1 + lib/crewai/tests/test_event_record.py | 423 ++++++++++++++++++ uv.lock | 21 +- 64 files changed, 2088 insertions(+), 721 deletions(-) rename lib/crewai/src/crewai/agents/agent_builder/{base_agent_executor_mixin.py => base_agent_executor.py} (70%) delete mode 100644 lib/crewai/src/crewai/runtime_state.py create mode 100644 lib/crewai/src/crewai/state/__init__.py create mode 100644 lib/crewai/src/crewai/state/event_record.py create mode 100644 lib/crewai/src/crewai/state/provider/__init__.py create mode 100644 lib/crewai/src/crewai/state/provider/core.py create mode 100644 lib/crewai/src/crewai/state/provider/json_provider.py create mode 100644 lib/crewai/src/crewai/state/runtime.py create mode 100644 lib/crewai/tests/test_event_record.py diff --git a/lib/crewai-tools/tests/test_generate_tool_specs.py b/lib/crewai-tools/tests/test_generate_tool_specs.py index 2f56ed1e6..7506c4ee4 100644 --- a/lib/crewai-tools/tests/test_generate_tool_specs.py +++ b/lib/crewai-tools/tests/test_generate_tool_specs.py @@ -97,6 +97,7 @@ def test_extract_init_params_schema(mock_tool_extractor): assert init_params_schema.keys() == { "$defs", "properties", + "required", "title", "type", } diff --git a/lib/crewai/pyproject.toml b/lib/crewai/pyproject.toml index 6b6602bf2..a09fb4461 100644 --- a/lib/crewai/pyproject.toml +++ b/lib/crewai/pyproject.toml @@ -43,6 +43,7 @@ dependencies = [ "uv~=0.9.13", "aiosqlite~=0.21.0", "pyyaml~=6.0", + "aiofiles~=24.1.0", "lancedb>=0.29.2,<0.30.1", ] diff --git a/lib/crewai/src/crewai/__init__.py b/lib/crewai/src/crewai/__init__.py index e82b92511..01be9fead 100644 --- a/lib/crewai/src/crewai/__init__.py +++ b/lib/crewai/src/crewai/__init__.py @@ -16,7 +16,6 @@ from crewai.knowledge.knowledge import Knowledge from crewai.llm import LLM from crewai.llms.base_llm import BaseLLM from crewai.process import Process -from crewai.runtime_state import _entity_discriminator from crewai.task import Task from crewai.tasks.llm_guardrail import LLMGuardrail from crewai.tasks.task_output import TaskOutput @@ -99,8 +98,8 @@ def __getattr__(name: str) -> Any: try: from crewai.agents.agent_builder.base_agent import BaseAgent as _BaseAgent - from crewai.agents.agent_builder.base_agent_executor_mixin import ( - CrewAgentExecutorMixin as _CrewAgentExecutorMixin, + from crewai.agents.agent_builder.base_agent_executor import ( + BaseAgentExecutor as _BaseAgentExecutor, ) from crewai.agents.tools_handler import ToolsHandler as _ToolsHandler from crewai.experimental.agent_executor import AgentExecutor as _AgentExecutor @@ -118,10 +117,18 @@ try: "Flow": Flow, "BaseLLM": BaseLLM, "Task": Task, - "CrewAgentExecutorMixin": _CrewAgentExecutorMixin, + "BaseAgentExecutor": _BaseAgentExecutor, "ExecutionContext": ExecutionContext, + "StandardPromptResult": _StandardPromptResult, + "SystemPromptResult": _SystemPromptResult, } + from crewai.tools.base_tool import BaseTool as _BaseTool + from crewai.tools.structured_tool import CrewStructuredTool as _CrewStructuredTool + + _base_namespace["BaseTool"] = _BaseTool + _base_namespace["CrewStructuredTool"] = _CrewStructuredTool + try: from crewai.a2a.config import ( A2AClientConfig as _A2AClientConfig, @@ -155,36 +162,49 @@ try: **sys.modules[_BaseAgent.__module__].__dict__, } + import crewai.state.runtime as _runtime_state_mod + for _mod_name in ( _BaseAgent.__module__, Agent.__module__, Crew.__module__, Flow.__module__, Task.__module__, + "crewai.agents.crew_agent_executor", + _runtime_state_mod.__name__, _AgentExecutor.__module__, ): sys.modules[_mod_name].__dict__.update(_resolve_namespace) + from crewai.agents.crew_agent_executor import ( + CrewAgentExecutor as _CrewAgentExecutor, + ) from crewai.tasks.conditional_task import ConditionalTask as _ConditionalTask + _BaseAgentExecutor.model_rebuild(force=True, _types_namespace=_full_namespace) _BaseAgent.model_rebuild(force=True, _types_namespace=_full_namespace) Task.model_rebuild(force=True, _types_namespace=_full_namespace) _ConditionalTask.model_rebuild(force=True, _types_namespace=_full_namespace) + _CrewAgentExecutor.model_rebuild(force=True, _types_namespace=_full_namespace) Crew.model_rebuild(force=True, _types_namespace=_full_namespace) Flow.model_rebuild(force=True, _types_namespace=_full_namespace) _AgentExecutor.model_rebuild(force=True, _types_namespace=_full_namespace) from typing import Annotated - from pydantic import Discriminator, RootModel, Tag + from pydantic import Field + + from crewai.state.runtime import RuntimeState Entity = Annotated[ - Annotated[Flow, Tag("flow")] # type: ignore[type-arg] - | Annotated[Crew, Tag("crew")] - | Annotated[Agent, Tag("agent")], - Discriminator(_entity_discriminator), + Flow | Crew | Agent, # type: ignore[type-arg] + Field(discriminator="entity_type"), ] - RuntimeState = RootModel[list[Entity]] + + RuntimeState.model_rebuild( + force=True, + _types_namespace={**_full_namespace, "Entity": Entity}, + ) try: Agent.model_rebuild(force=True, _types_namespace=_full_namespace) @@ -205,6 +225,7 @@ __all__ = [ "BaseLLM", "Crew", "CrewOutput", + "Entity", "ExecutionContext", "Flow", "Knowledge", diff --git a/lib/crewai/src/crewai/agent/core.py b/lib/crewai/src/crewai/agent/core.py index 34250436f..66554c59d 100644 --- a/lib/crewai/src/crewai/agent/core.py +++ b/lib/crewai/src/crewai/agent/core.py @@ -27,7 +27,6 @@ from pydantic import ( BeforeValidator, ConfigDict, Field, - InstanceOf, PrivateAttr, model_validator, ) @@ -195,12 +194,12 @@ class Agent(BaseAgent): llm: Annotated[ str | BaseLLM | None, BeforeValidator(_validate_llm_ref), - PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"), + PlainSerializer(_serialize_llm_ref, return_type=dict | None, when_used="json"), ] = Field(description="Language model that will run the agent.", default=None) function_calling_llm: Annotated[ str | BaseLLM | None, BeforeValidator(_validate_llm_ref), - PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"), + PlainSerializer(_serialize_llm_ref, return_type=dict | None, when_used="json"), ] = Field(description="Language model that will run the agent.", default=None) system_template: str | None = Field( default=None, description="System format for the agent." @@ -297,8 +296,8 @@ class Agent(BaseAgent): Can be a single A2AConfig/A2AClientConfig/A2AServerConfig, or a list of any number of A2AConfig/A2AClientConfig with a single A2AServerConfig. """, ) - agent_executor: InstanceOf[CrewAgentExecutor] | InstanceOf[AgentExecutor] | None = ( - Field(default=None, description="An instance of the CrewAgentExecutor class.") + agent_executor: CrewAgentExecutor | AgentExecutor | None = Field( + default=None, description="An instance of the CrewAgentExecutor class." ) executor_class: Annotated[ type[CrewAgentExecutor] | type[AgentExecutor], @@ -1011,10 +1010,10 @@ class Agent(BaseAgent): ) self.agent_executor = self.executor_class( llm=self.llm, - task=task, # type: ignore[arg-type] + task=task, i18n=self.i18n, agent=self, - crew=self.crew, # type: ignore[arg-type] + crew=self.crew, tools=parsed_tools, prompt=prompt, original_tools=raw_tools, @@ -1057,7 +1056,8 @@ class Agent(BaseAgent): if self.agent_executor is None: raise RuntimeError("Agent executor is not initialized.") - self.agent_executor.task = task + if task is not None: + self.agent_executor.task = task self.agent_executor.tools = tools self.agent_executor.original_tools = raw_tools self.agent_executor.prompt = prompt @@ -1076,7 +1076,7 @@ class Agent(BaseAgent): self.agent_executor.tools_handler = self.tools_handler self.agent_executor.request_within_rpm_limit = rpm_limit_fn - if self.agent_executor.llm: + if isinstance(self.agent_executor.llm, BaseLLM): existing_stop = getattr(self.agent_executor.llm, "stop", []) self.agent_executor.llm.stop = list( set( diff --git a/lib/crewai/src/crewai/agents/agent_builder/base_agent.py b/lib/crewai/src/crewai/agents/agent_builder/base_agent.py index d71f27a2d..cfa08bbc3 100644 --- a/lib/crewai/src/crewai/agents/agent_builder/base_agent.py +++ b/lib/crewai/src/crewai/agents/agent_builder/base_agent.py @@ -14,8 +14,8 @@ from pydantic import ( BaseModel, BeforeValidator, Field, - InstanceOf, PrivateAttr, + SerializeAsAny, field_validator, model_validator, ) @@ -24,7 +24,7 @@ from pydantic_core import PydanticCustomError from typing_extensions import Self from crewai.agent.internal.meta import AgentMeta -from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin +from crewai.agents.agent_builder.base_agent_executor import BaseAgentExecutor from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess from crewai.agents.cache.cache_handler import CacheHandler from crewai.agents.tools_handler import ToolsHandler @@ -51,6 +51,7 @@ from crewai.utilities.string_utils import interpolate_only if TYPE_CHECKING: from crewai.context import ExecutionContext from crewai.crew import Crew + from crewai.state.provider.core import BaseProvider def _validate_crew_ref(value: Any) -> Any: @@ -63,7 +64,31 @@ def _serialize_crew_ref(value: Any) -> str | None: return str(value.id) if hasattr(value, "id") else str(value) +_LLM_TYPE_REGISTRY: dict[str, str] = { + "base": "crewai.llms.base_llm.BaseLLM", + "litellm": "crewai.llm.LLM", + "openai": "crewai.llms.providers.openai.completion.OpenAICompletion", + "anthropic": "crewai.llms.providers.anthropic.completion.AnthropicCompletion", + "azure": "crewai.llms.providers.azure.completion.AzureCompletion", + "bedrock": "crewai.llms.providers.bedrock.completion.BedrockCompletion", + "gemini": "crewai.llms.providers.gemini.completion.GeminiCompletion", +} + + def _validate_llm_ref(value: Any) -> Any: + if isinstance(value, dict): + import importlib + + llm_type = value.get("llm_type") + if not llm_type or llm_type not in _LLM_TYPE_REGISTRY: + raise ValueError( + f"Unknown or missing llm_type: {llm_type!r}. " + f"Expected one of {list(_LLM_TYPE_REGISTRY)}" + ) + dotted = _LLM_TYPE_REGISTRY[llm_type] + mod_path, cls_name = dotted.rsplit(".", 1) + cls = getattr(importlib.import_module(mod_path), cls_name) + return cls(**value) return value @@ -75,12 +100,37 @@ def _resolve_agent(value: Any, info: Any) -> Any: return Agent.model_validate(value, context=getattr(info, "context", None)) -def _serialize_llm_ref(value: Any) -> str | None: +_EXECUTOR_TYPE_REGISTRY: dict[str, str] = { + "base": "crewai.agents.agent_builder.base_agent_executor.BaseAgentExecutor", + "crew": "crewai.agents.crew_agent_executor.CrewAgentExecutor", + "experimental": "crewai.experimental.agent_executor.AgentExecutor", +} + + +def _validate_executor_ref(value: Any) -> Any: + if isinstance(value, dict): + import importlib + + executor_type = value.get("executor_type") + if not executor_type or executor_type not in _EXECUTOR_TYPE_REGISTRY: + raise ValueError( + f"Unknown or missing executor_type: {executor_type!r}. " + f"Expected one of {list(_EXECUTOR_TYPE_REGISTRY)}" + ) + dotted = _EXECUTOR_TYPE_REGISTRY[executor_type] + mod_path, cls_name = dotted.rsplit(".", 1) + cls = getattr(importlib.import_module(mod_path), cls_name) + return cls.model_validate(value) + return value + + +def _serialize_llm_ref(value: Any) -> dict[str, Any] | None: if value is None: return None if isinstance(value, str): - return value - return getattr(value, "model", str(value)) + return {"model": value} + result: dict[str, Any] = value.model_dump() + return result _SLUG_RE: Final[re.Pattern[str]] = re.compile( @@ -197,13 +247,19 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta): max_iter: int = Field( default=25, description="Maximum iterations for an agent to execute a task" ) - agent_executor: InstanceOf[CrewAgentExecutorMixin] | None = Field( + agent_executor: SerializeAsAny[BaseAgentExecutor] | None = Field( default=None, description="An instance of the CrewAgentExecutor class." ) + + @field_validator("agent_executor", mode="before") + @classmethod + def _validate_agent_executor(cls, v: Any) -> Any: + return _validate_executor_ref(v) + llm: Annotated[ str | BaseLLM | None, BeforeValidator(_validate_llm_ref), - PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"), + PlainSerializer(_serialize_llm_ref, return_type=dict | None, when_used="json"), ] = Field(default=None, description="Language model that will run the agent.") crew: Annotated[ Crew | str | None, @@ -276,6 +332,30 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta): ) execution_context: ExecutionContext | None = Field(default=None) + @classmethod + def from_checkpoint( + cls, path: str, *, provider: BaseProvider | None = None + ) -> Self: + """Restore an Agent from a checkpoint file.""" + from crewai.context import apply_execution_context + from crewai.state.provider.json_provider import JsonProvider + from crewai.state.runtime import RuntimeState + + state = RuntimeState.from_checkpoint( + path, + provider=provider or JsonProvider(), + context={"from_checkpoint": True}, + ) + for entity in state.root: + if isinstance(entity, cls): + if entity.execution_context is not None: + apply_execution_context(entity.execution_context) + if entity.agent_executor is not None: + entity.agent_executor.agent = entity + entity.agent_executor._resuming = True + return entity + raise ValueError(f"No {cls.__name__} found in checkpoint: {path}") + @model_validator(mode="before") @classmethod def process_model_config(cls, values: Any) -> dict[str, Any]: diff --git a/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor_mixin.py b/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor.py similarity index 70% rename from lib/crewai/src/crewai/agents/agent_builder/base_agent_executor_mixin.py rename to lib/crewai/src/crewai/agents/agent_builder/base_agent_executor.py index 6d01f1e27..ad56807e4 100644 --- a/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor_mixin.py +++ b/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor.py @@ -2,37 +2,40 @@ from __future__ import annotations from typing import TYPE_CHECKING +from pydantic import BaseModel, Field, PrivateAttr + from crewai.agents.parser import AgentFinish from crewai.memory.utils import sanitize_scope_name from crewai.utilities.printer import Printer from crewai.utilities.string_utils import sanitize_tool_name +from crewai.utilities.types import LLMMessage if TYPE_CHECKING: - from crewai.agent import Agent + from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.crew import Crew from crewai.task import Task from crewai.utilities.i18n import I18N - from crewai.utilities.types import LLMMessage -class CrewAgentExecutorMixin: - crew: Crew | None - agent: Agent - task: Task | None - iterations: int - max_iter: int - messages: list[LLMMessage] - _i18n: I18N - _printer: Printer = Printer() +class BaseAgentExecutor(BaseModel): + model_config = {"arbitrary_types_allowed": True} + + executor_type: str = "base" + crew: Crew | None = Field(default=None, exclude=True) + agent: BaseAgent | None = Field(default=None, exclude=True) + task: Task | None = Field(default=None, exclude=True) + iterations: int = Field(default=0) + max_iter: int = Field(default=25) + messages: list[LLMMessage] = Field(default_factory=list) + _resuming: bool = PrivateAttr(default=False) + _i18n: I18N | None = PrivateAttr(default=None) + _printer: Printer = PrivateAttr(default_factory=Printer) def _save_to_memory(self, output: AgentFinish) -> None: - """Save task result to unified memory (memory or crew._memory). - - Extends the memory's root_scope with agent-specific path segment - (e.g., '/crew/research-crew/agent/researcher') so that agent memories - are scoped hierarchically under their crew. - """ + """Save task result to unified memory (memory or crew._memory).""" + if self.agent is None: + return memory = getattr(self.agent, "memory", None) or ( getattr(self.crew, "_memory", None) if self.crew else None ) @@ -49,11 +52,9 @@ class CrewAgentExecutorMixin: ) extracted = memory.extract_memories(raw) if extracted: - # Get the memory's existing root_scope base_root = getattr(memory, "root_scope", None) if isinstance(base_root, str) and base_root: - # Memory has a root_scope — extend it with agent info agent_role = self.agent.role or "unknown" sanitized_role = sanitize_scope_name(agent_role) agent_root = f"{base_root.rstrip('/')}/agent/{sanitized_role}" @@ -63,7 +64,6 @@ class CrewAgentExecutorMixin: extracted, agent_role=self.agent.role, root_scope=agent_root ) else: - # No base root_scope — don't inject one, preserve backward compat memory.remember_many(extracted, agent_role=self.agent.role) except Exception as e: self.agent._logger.log("error", f"Failed to save to memory: {e}") diff --git a/lib/crewai/src/crewai/agents/agent_builder/utilities/base_token_process.py b/lib/crewai/src/crewai/agents/agent_builder/utilities/base_token_process.py index 1fa46dd61..7f1b2cf0f 100644 --- a/lib/crewai/src/crewai/agents/agent_builder/utilities/base_token_process.py +++ b/lib/crewai/src/crewai/agents/agent_builder/utilities/base_token_process.py @@ -1,71 +1,34 @@ -"""Token usage tracking utilities. +"""Token usage tracking utilities.""" -This module provides utilities for tracking token consumption and request -metrics during agent execution. -""" +from pydantic import BaseModel, Field from crewai.types.usage_metrics import UsageMetrics -class TokenProcess: - """Track token usage during agent processing. +class TokenProcess(BaseModel): + """Track token usage during agent processing.""" - Attributes: - total_tokens: Total number of tokens used. - prompt_tokens: Number of tokens used in prompts. - cached_prompt_tokens: Number of cached prompt tokens used. - completion_tokens: Number of tokens used in completions. - successful_requests: Number of successful requests made. - """ - - def __init__(self) -> None: - """Initialize token tracking with zero values.""" - self.total_tokens: int = 0 - self.prompt_tokens: int = 0 - self.cached_prompt_tokens: int = 0 - self.completion_tokens: int = 0 - self.successful_requests: int = 0 + total_tokens: int = Field(default=0) + prompt_tokens: int = Field(default=0) + cached_prompt_tokens: int = Field(default=0) + completion_tokens: int = Field(default=0) + successful_requests: int = Field(default=0) def sum_prompt_tokens(self, tokens: int) -> None: - """Add prompt tokens to the running totals. - - Args: - tokens: Number of prompt tokens to add. - """ self.prompt_tokens += tokens self.total_tokens += tokens def sum_completion_tokens(self, tokens: int) -> None: - """Add completion tokens to the running totals. - - Args: - tokens: Number of completion tokens to add. - """ self.completion_tokens += tokens self.total_tokens += tokens def sum_cached_prompt_tokens(self, tokens: int) -> None: - """Add cached prompt tokens to the running total. - - Args: - tokens: Number of cached prompt tokens to add. - """ self.cached_prompt_tokens += tokens def sum_successful_requests(self, requests: int) -> None: - """Add successful requests to the running total. - - Args: - requests: Number of successful requests to add. - """ self.successful_requests += requests def get_summary(self) -> UsageMetrics: - """Get a summary of all tracked metrics. - - Returns: - UsageMetrics object with current totals. - """ return UsageMetrics( total_tokens=self.total_tokens, prompt_tokens=self.prompt_tokens, diff --git a/lib/crewai/src/crewai/agents/crew_agent_executor.py b/lib/crewai/src/crewai/agents/crew_agent_executor.py index 0707f59d6..0a002ed8e 100644 --- a/lib/crewai/src/crewai/agents/crew_agent_executor.py +++ b/lib/crewai/src/crewai/agents/crew_agent_executor.py @@ -1,3 +1,4 @@ +# mypy: disable-error-code="union-attr,arg-type" """Agent executor for crew AI agents. Handles agent execution flow including LLM interactions, tool execution, @@ -12,12 +13,20 @@ from concurrent.futures import ThreadPoolExecutor, as_completed import contextvars import inspect import logging -from typing import TYPE_CHECKING, Any, Literal, cast +from typing import TYPE_CHECKING, Annotated, Any, Literal, cast -from pydantic import BaseModel, GetCoreSchemaHandler, ValidationError -from pydantic_core import CoreSchema, core_schema +from pydantic import ( + AliasChoices, + BaseModel, + BeforeValidator, + ConfigDict, + Field, + ValidationError, +) +from pydantic.functional_serializers import PlainSerializer -from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin +from crewai.agents.agent_builder.base_agent import _serialize_llm_ref, _validate_llm_ref +from crewai.agents.agent_builder.base_agent_executor import BaseAgentExecutor from crewai.agents.parser import ( AgentAction, AgentFinish, @@ -38,6 +47,7 @@ from crewai.hooks.tool_hooks import ( get_after_tool_call_hooks, get_before_tool_call_hooks, ) +from crewai.types.callback import SerializableCallable from crewai.utilities.agent_utils import ( aget_llm_response, convert_tools_to_openai_schema, @@ -58,8 +68,8 @@ from crewai.utilities.agent_utils import ( from crewai.utilities.constants import TRAINING_DATA_FILE from crewai.utilities.file_store import aget_all_files, get_all_files from crewai.utilities.i18n import I18N, get_i18n -from crewai.utilities.printer import Printer from crewai.utilities.string_utils import sanitize_tool_name +from crewai.utilities.token_counter_callback import TokenCalcHandler from crewai.utilities.tool_utils import ( aexecute_tool_and_check_finality, execute_tool_and_check_finality, @@ -70,11 +80,8 @@ from crewai.utilities.training_handler import CrewTrainingHandler logger = logging.getLogger(__name__) if TYPE_CHECKING: - from crewai.agent import Agent from crewai.agents.tools_handler import ToolsHandler - from crewai.crew import Crew from crewai.llms.base_llm import BaseLLM - from crewai.task import Task from crewai.tools.base_tool import BaseTool from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.tool_types import ToolResult @@ -82,87 +89,59 @@ if TYPE_CHECKING: from crewai.utilities.types import LLMMessage -class CrewAgentExecutor(CrewAgentExecutorMixin): +class CrewAgentExecutor(BaseAgentExecutor): """Executor for crew agents. Manages the execution lifecycle of an agent including prompt formatting, LLM interactions, tool execution, and feedback handling. """ - def __init__( - self, - llm: BaseLLM, - task: Task, - crew: Crew, - agent: Agent, - prompt: SystemPromptResult | StandardPromptResult, - max_iter: int, - tools: list[CrewStructuredTool], - tools_names: str, - stop_words: list[str], - tools_description: str, - tools_handler: ToolsHandler, - step_callback: Any = None, - original_tools: list[BaseTool] | None = None, - function_calling_llm: BaseLLM | Any | None = None, - respect_context_window: bool = False, - request_within_rpm_limit: Callable[[], bool] | None = None, - callbacks: list[Any] | None = None, - response_model: type[BaseModel] | None = None, - i18n: I18N | None = None, - ) -> None: - """Initialize executor. + executor_type: Literal["crew"] = "crew" + llm: Annotated[ + BaseLLM | str | None, + BeforeValidator(_validate_llm_ref), + PlainSerializer(_serialize_llm_ref, return_type=dict | None, when_used="json"), + ] = Field(default=None) + prompt: SystemPromptResult | StandardPromptResult | None = Field(default=None) + tools: list[CrewStructuredTool] = Field(default_factory=list) + tools_names: str = Field(default="") + stop: list[str] = Field( + default_factory=list, validation_alias=AliasChoices("stop", "stop_words") + ) + tools_description: str = Field(default="") + tools_handler: ToolsHandler | None = Field(default=None) + step_callback: SerializableCallable | None = Field(default=None, exclude=True) + original_tools: list[BaseTool] = Field(default_factory=list) + function_calling_llm: Annotated[ + BaseLLM | str | None, + BeforeValidator(_validate_llm_ref), + PlainSerializer(_serialize_llm_ref, return_type=dict | None, when_used="json"), + ] = Field(default=None) + respect_context_window: bool = Field(default=False) + request_within_rpm_limit: SerializableCallable | None = Field( + default=None, exclude=True + ) + callbacks: list[TokenCalcHandler] = Field(default_factory=list, exclude=True) + response_model: type[BaseModel] | None = Field(default=None, exclude=True) + ask_for_human_input: bool = Field(default=False) + log_error_after: int = Field(default=3) + before_llm_call_hooks: list[SerializableCallable] = Field( + default_factory=list, exclude=True + ) + after_llm_call_hooks: list[SerializableCallable] = Field( + default_factory=list, exclude=True + ) - Args: - llm: Language model instance. - task: Task to execute. - crew: Crew instance. - agent: Agent to execute. - prompt: Prompt templates. - max_iter: Maximum iterations. - tools: Available tools. - tools_names: Tool names string. - stop_words: Stop word list. - tools_description: Tool descriptions. - tools_handler: Tool handler instance. - step_callback: Optional step callback. - original_tools: Original tool list. - function_calling_llm: Optional function calling LLM. - respect_context_window: Respect context limits. - request_within_rpm_limit: RPM limit check function. - callbacks: Optional callbacks list. - response_model: Optional Pydantic model for structured outputs. - """ - self._i18n: I18N = i18n or get_i18n() - self.llm = llm - self.task = task - self.agent = agent - self.crew = crew - self.prompt = prompt - self.tools = tools - self.tools_names = tools_names - self.stop = stop_words - self.max_iter = max_iter - self.callbacks = callbacks or [] - self._printer: Printer = Printer() - self.tools_handler = tools_handler - self.original_tools = original_tools or [] - self.step_callback = step_callback - self.tools_description = tools_description - self.function_calling_llm = function_calling_llm - self.respect_context_window = respect_context_window - self.request_within_rpm_limit = request_within_rpm_limit - self.response_model = response_model - self.ask_for_human_input = False - self.messages: list[LLMMessage] = [] - self.iterations = 0 - self.log_error_after = 3 - self.before_llm_call_hooks: list[Callable[..., Any]] = [] - self.after_llm_call_hooks: list[Callable[..., Any]] = [] - self.before_llm_call_hooks.extend(get_before_llm_call_hooks()) - self.after_llm_call_hooks.extend(get_after_llm_call_hooks()) - if self.llm: - # This may be mutating the shared llm object and needs further evaluation + model_config = ConfigDict(arbitrary_types_allowed=True, populate_by_name=True) + + def __init__(self, i18n: I18N | None = None, **kwargs: Any) -> None: + super().__init__(**kwargs) + self._i18n = i18n or get_i18n() + if not self.before_llm_call_hooks: + self.before_llm_call_hooks.extend(get_before_llm_call_hooks()) + if not self.after_llm_call_hooks: + self.after_llm_call_hooks.extend(get_after_llm_call_hooks()) + if self.llm and not isinstance(self.llm, str): existing_stop = getattr(self.llm, "stop", []) self.llm.stop = list( set( @@ -179,7 +158,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): Returns: bool: True if tool should be used or not. """ - return self.llm.supports_stop_words() if self.llm else False + from crewai.llms.base_llm import BaseLLM + + return ( + self.llm.supports_stop_words() if isinstance(self.llm, BaseLLM) else False + ) def _setup_messages(self, inputs: dict[str, Any]) -> None: """Set up messages for the agent execution. @@ -191,7 +174,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): if provider.setup_messages(cast(ExecutorContext, cast(object, self))): return - if "system" in self.prompt: + if self.prompt is not None and "system" in self.prompt: system_prompt = self._format_prompt( cast(str, self.prompt.get("system", "")), inputs ) @@ -200,7 +183,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): ) self.messages.append(format_message_for_llm(system_prompt, role="system")) self.messages.append(format_message_for_llm(user_prompt)) - else: + elif self.prompt is not None: user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs) self.messages.append(format_message_for_llm(user_prompt)) @@ -215,9 +198,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): Returns: Dictionary with agent output. """ - self._setup_messages(inputs) - - self._inject_multimodal_files(inputs) + if self._resuming: + self._resuming = False + else: + self._setup_messages(inputs) + self._inject_multimodal_files(inputs) self._show_start_logs() @@ -344,7 +329,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): printer=self._printer, i18n=self._i18n, messages=self.messages, - llm=self.llm, + llm=cast("BaseLLM", self.llm), callbacks=self.callbacks, verbose=self.agent.verbose, ) @@ -353,7 +338,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): enforce_rpm_limit(self.request_within_rpm_limit) answer = get_llm_response( - llm=self.llm, + llm=cast("BaseLLM", self.llm), messages=self.messages, callbacks=self.callbacks, printer=self._printer, @@ -428,8 +413,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): formatted_answer, tool_result ) - self._invoke_step_callback(formatted_answer) # type: ignore[arg-type] - self._append_message(formatted_answer.text) # type: ignore[union-attr] + self._invoke_step_callback(formatted_answer) + self._append_message(formatted_answer.text) except OutputParserError as e: formatted_answer = handle_output_parser_exception( # type: ignore[assignment] @@ -450,7 +435,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): respect_context_window=self.respect_context_window, printer=self._printer, messages=self.messages, - llm=self.llm, + llm=cast("BaseLLM", self.llm), callbacks=self.callbacks, i18n=self._i18n, verbose=self.agent.verbose, @@ -500,7 +485,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): printer=self._printer, i18n=self._i18n, messages=self.messages, - llm=self.llm, + llm=cast("BaseLLM", self.llm), callbacks=self.callbacks, verbose=self.agent.verbose, ) @@ -514,7 +499,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): # without executing them. The executor handles tool execution # via _handle_native_tool_calls to properly manage message history. answer = get_llm_response( - llm=self.llm, + llm=cast("BaseLLM", self.llm), messages=self.messages, callbacks=self.callbacks, printer=self._printer, @@ -587,7 +572,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): respect_context_window=self.respect_context_window, printer=self._printer, messages=self.messages, - llm=self.llm, + llm=cast("BaseLLM", self.llm), callbacks=self.callbacks, i18n=self._i18n, verbose=self.agent.verbose, @@ -607,7 +592,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): enforce_rpm_limit(self.request_within_rpm_limit) answer = get_llm_response( - llm=self.llm, + llm=cast("BaseLLM", self.llm), messages=self.messages, callbacks=self.callbacks, printer=self._printer, @@ -966,7 +951,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): before_hook_context = ToolCallHookContext( tool_name=func_name, tool_input=args_dict or {}, - tool=structured_tool, # type: ignore[arg-type] + tool=structured_tool, agent=self.agent, task=self.task, crew=self.crew, @@ -1031,7 +1016,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): after_hook_context = ToolCallHookContext( tool_name=func_name, tool_input=args_dict or {}, - tool=structured_tool, # type: ignore[arg-type] + tool=structured_tool, agent=self.agent, task=self.task, crew=self.crew, @@ -1119,9 +1104,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): Returns: Dictionary with agent output. """ - self._setup_messages(inputs) - - await self._ainject_multimodal_files(inputs) + if self._resuming: + self._resuming = False + else: + self._setup_messages(inputs) + await self._ainject_multimodal_files(inputs) self._show_start_logs() @@ -1184,7 +1171,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): printer=self._printer, i18n=self._i18n, messages=self.messages, - llm=self.llm, + llm=cast("BaseLLM", self.llm), callbacks=self.callbacks, verbose=self.agent.verbose, ) @@ -1193,7 +1180,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): enforce_rpm_limit(self.request_within_rpm_limit) answer = await aget_llm_response( - llm=self.llm, + llm=cast("BaseLLM", self.llm), messages=self.messages, callbacks=self.callbacks, printer=self._printer, @@ -1267,8 +1254,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): formatted_answer, tool_result ) - await self._ainvoke_step_callback(formatted_answer) # type: ignore[arg-type] - self._append_message(formatted_answer.text) # type: ignore[union-attr] + await self._ainvoke_step_callback(formatted_answer) + self._append_message(formatted_answer.text) except OutputParserError as e: formatted_answer = handle_output_parser_exception( # type: ignore[assignment] @@ -1288,7 +1275,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): respect_context_window=self.respect_context_window, printer=self._printer, messages=self.messages, - llm=self.llm, + llm=cast("BaseLLM", self.llm), callbacks=self.callbacks, i18n=self._i18n, verbose=self.agent.verbose, @@ -1332,7 +1319,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): printer=self._printer, i18n=self._i18n, messages=self.messages, - llm=self.llm, + llm=cast("BaseLLM", self.llm), callbacks=self.callbacks, verbose=self.agent.verbose, ) @@ -1346,7 +1333,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): # without executing them. The executor handles tool execution # via _handle_native_tool_calls to properly manage message history. answer = await aget_llm_response( - llm=self.llm, + llm=cast("BaseLLM", self.llm), messages=self.messages, callbacks=self.callbacks, printer=self._printer, @@ -1418,7 +1405,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): respect_context_window=self.respect_context_window, printer=self._printer, messages=self.messages, - llm=self.llm, + llm=cast("BaseLLM", self.llm), callbacks=self.callbacks, i18n=self._i18n, verbose=self.agent.verbose, @@ -1438,7 +1425,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): enforce_rpm_limit(self.request_within_rpm_limit) answer = await aget_llm_response( - llm=self.llm, + llm=cast("BaseLLM", self.llm), messages=self.messages, callbacks=self.callbacks, printer=self._printer, @@ -1687,14 +1674,3 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): return format_message_for_llm( self._i18n.slice("feedback_instructions").format(feedback=feedback) ) - - @classmethod - def __get_pydantic_core_schema__( - cls, _source_type: Any, _handler: GetCoreSchemaHandler - ) -> CoreSchema: - """Generate Pydantic core schema for BaseClient Protocol. - - This allows the Protocol to be used in Pydantic models without - requiring arbitrary_types_allowed=True. - """ - return core_schema.any_schema() diff --git a/lib/crewai/src/crewai/agents/planner_observer.py b/lib/crewai/src/crewai/agents/planner_observer.py index 8be1c7368..16d1a747e 100644 --- a/lib/crewai/src/crewai/agents/planner_observer.py +++ b/lib/crewai/src/crewai/agents/planner_observer.py @@ -30,7 +30,7 @@ from crewai.utilities.types import LLMMessage if TYPE_CHECKING: - from crewai.agent import Agent + from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.task import Task logger = logging.getLogger(__name__) @@ -56,7 +56,7 @@ class PlannerObserver: def __init__( self, - agent: Agent, + agent: BaseAgent, task: Task | None = None, kickoff_input: str = "", ) -> None: diff --git a/lib/crewai/src/crewai/agents/step_executor.py b/lib/crewai/src/crewai/agents/step_executor.py index dad13afa2..29836497c 100644 --- a/lib/crewai/src/crewai/agents/step_executor.py +++ b/lib/crewai/src/crewai/agents/step_executor.py @@ -48,7 +48,7 @@ from crewai.utilities.types import LLMMessage if TYPE_CHECKING: - from crewai.agent import Agent + from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.tools_handler import ToolsHandler from crewai.crew import Crew from crewai.llms.base_llm import BaseLLM @@ -88,7 +88,7 @@ class StepExecutor: self, llm: BaseLLM, tools: list[CrewStructuredTool], - agent: Agent, + agent: BaseAgent, original_tools: list[BaseTool] | None = None, tools_handler: ToolsHandler | None = None, task: Task | None = None, diff --git a/lib/crewai/src/crewai/context.py b/lib/crewai/src/crewai/context.py index e6efe4349..10184ff39 100644 --- a/lib/crewai/src/crewai/context.py +++ b/lib/crewai/src/crewai/context.py @@ -90,7 +90,7 @@ class ExecutionContext(BaseModel): flow_id: str | None = Field(default=None) flow_method_name: str = Field(default="unknown") - event_id_stack: tuple[tuple[str, str], ...] = Field(default=()) + event_id_stack: tuple[tuple[str, str], ...] = Field(default_factory=tuple) last_event_id: str | None = Field(default=None) triggering_event_id: str | None = Field(default=None) emission_sequence: int = Field(default=0) diff --git a/lib/crewai/src/crewai/crew.py b/lib/crewai/src/crewai/crew.py index bd84f3067..2e7964fb1 100644 --- a/lib/crewai/src/crewai/crew.py +++ b/lib/crewai/src/crewai/crew.py @@ -42,6 +42,7 @@ if TYPE_CHECKING: from opentelemetry.trace import Span from crewai.context import ExecutionContext + from crewai.state.provider.core import BaseProvider try: from crewai_files import get_supported_content_types @@ -234,7 +235,7 @@ class Crew(FlowTrackable, BaseModel): manager_llm: Annotated[ str | BaseLLM | None, BeforeValidator(_validate_llm_ref), - PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"), + PlainSerializer(_serialize_llm_ref, return_type=dict | None, when_used="json"), ] = Field(description="Language model that will run the agent.", default=None) manager_agent: Annotated[ BaseAgent | None, @@ -243,7 +244,7 @@ class Crew(FlowTrackable, BaseModel): function_calling_llm: Annotated[ str | LLM | None, BeforeValidator(_validate_llm_ref), - PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"), + PlainSerializer(_serialize_llm_ref, return_type=dict | None, when_used="json"), ] = Field(description="Language model that will run the agent.", default=None) config: Json[dict[str, Any]] | dict[str, Any] | None = Field(default=None) id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True) @@ -296,7 +297,7 @@ class Crew(FlowTrackable, BaseModel): planning_llm: Annotated[ str | BaseLLM | None, BeforeValidator(_validate_llm_ref), - PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"), + PlainSerializer(_serialize_llm_ref, return_type=dict | None, when_used="json"), ] = Field( default=None, description=( @@ -321,7 +322,7 @@ class Crew(FlowTrackable, BaseModel): chat_llm: Annotated[ str | BaseLLM | None, BeforeValidator(_validate_llm_ref), - PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"), + PlainSerializer(_serialize_llm_ref, return_type=dict | None, when_used="json"), ] = Field( default=None, description="LLM used to handle chatting with the crew.", @@ -353,6 +354,113 @@ class Crew(FlowTrackable, BaseModel): checkpoint_train: bool | None = Field(default=None) checkpoint_kickoff_event_id: str | None = Field(default=None) + @classmethod + def from_checkpoint( + cls, path: str, *, provider: BaseProvider | None = None + ) -> Crew: + """Restore a Crew from a checkpoint file, ready to resume via kickoff(). + + Args: + path: Path to a checkpoint JSON file. + provider: Storage backend to read from. Defaults to JsonProvider. + + Returns: + A Crew instance. Call kickoff() to resume from the last completed task. + """ + from crewai.context import apply_execution_context + from crewai.events.event_bus import crewai_event_bus + from crewai.state.provider.json_provider import JsonProvider + from crewai.state.runtime import RuntimeState + + state = RuntimeState.from_checkpoint( + path, + provider=provider or JsonProvider(), + context={"from_checkpoint": True}, + ) + crewai_event_bus.set_runtime_state(state) + for entity in state.root: + if isinstance(entity, cls): + if entity.execution_context is not None: + apply_execution_context(entity.execution_context) + entity._restore_runtime() + return entity + raise ValueError(f"No Crew found in checkpoint: {path}") + + def _restore_runtime(self) -> None: + """Re-create runtime objects after restoring from a checkpoint.""" + for agent in self.agents: + agent.crew = self + executor = agent.agent_executor + if executor and executor.messages: + executor.crew = self + executor.agent = agent + executor._resuming = True + else: + agent.agent_executor = None + for task in self.tasks: + if task.agent is not None: + for agent in self.agents: + if agent.role == task.agent.role: + task.agent = agent + if agent.agent_executor is not None and task.output is None: + agent.agent_executor.task = task + break + if self.checkpoint_inputs is not None: + self._inputs = self.checkpoint_inputs + if self.checkpoint_kickoff_event_id is not None: + self._kickoff_event_id = self.checkpoint_kickoff_event_id + if self.checkpoint_train is not None: + self._train = self.checkpoint_train + + self._restore_event_scope() + + def _restore_event_scope(self) -> None: + """Rebuild the event scope stack from the checkpoint's event record.""" + from crewai.events.base_events import set_emission_counter + from crewai.events.event_bus import crewai_event_bus + from crewai.events.event_context import ( + restore_event_scope, + set_last_event_id, + ) + + state = crewai_event_bus._runtime_state + if state is None: + return + + # Restore crew scope and the in-progress task scope. Inner scopes + # (agent, llm, tool) are re-created by the executor on resume. + stack: list[tuple[str, str]] = [] + if self._kickoff_event_id: + stack.append((self._kickoff_event_id, "crew_kickoff_started")) + + # Find the task_started event for the in-progress task (skipped on resume) + for task in self.tasks: + if task.output is None: + task_id_str = str(task.id) + for node in state.event_record.nodes.values(): + if ( + node.event.type == "task_started" + and node.event.task_id == task_id_str + ): + stack.append((node.event.event_id, "task_started")) + break + break + + restore_event_scope(tuple(stack)) + + # Restore last_event_id and emission counter from the record + last_event_id: str | None = None + max_seq = 0 + for node in state.event_record.nodes.values(): + seq = node.event.emission_sequence or 0 + if seq > max_seq: + max_seq = seq + last_event_id = node.event.event_id + if last_event_id is not None: + set_last_event_id(last_event_id) + if max_seq > 0: + set_emission_counter(max_seq) + @field_validator("id", mode="before") @classmethod def _deny_user_set_id(cls, v: UUID4 | None, info: Any) -> UUID4 | None: @@ -381,7 +489,8 @@ class Crew(FlowTrackable, BaseModel): @model_validator(mode="after") def set_private_attrs(self) -> Crew: """set private attributes.""" - self._cache_handler = CacheHandler() + if not getattr(self, "_cache_handler", None): + self._cache_handler = CacheHandler() event_listener = EventListener() # Determine and set tracing state once for this execution @@ -1055,6 +1164,10 @@ class Crew(FlowTrackable, BaseModel): Returns: CrewOutput: Final output of the crew """ + custom_start = self._get_execution_start_index(tasks) + if custom_start is not None: + start_index = custom_start + task_outputs: list[TaskOutput] = [] pending_tasks: list[tuple[Task, asyncio.Task[TaskOutput], int]] = [] last_sync_output: TaskOutput | None = None @@ -1236,7 +1349,12 @@ class Crew(FlowTrackable, BaseModel): manager.crew = self def _get_execution_start_index(self, tasks: list[Task]) -> int | None: - return None + if self.checkpoint_kickoff_event_id is None: + return None + for i, task in enumerate(tasks): + if task.output is None: + return i + return len(tasks) if tasks else None def _execute_tasks( self, diff --git a/lib/crewai/src/crewai/crews/utils.py b/lib/crewai/src/crewai/crews/utils.py index 2b62240d2..4077a9a19 100644 --- a/lib/crewai/src/crewai/crews/utils.py +++ b/lib/crewai/src/crewai/crews/utils.py @@ -105,6 +105,9 @@ def setup_agents( agent.function_calling_llm = function_calling_llm # type: ignore[attr-defined] if not agent.step_callback: # type: ignore[attr-defined] agent.step_callback = step_callback # type: ignore[attr-defined] + executor = getattr(agent, "agent_executor", None) + if executor and getattr(executor, "_resuming", False): + continue agent.create_agent_executor() @@ -157,10 +160,8 @@ def prepare_task_execution( # Handle replay skip if start_index is not None and task_index < start_index: if task.output: - if task.async_execution: - task_outputs.append(task.output) - else: - task_outputs = [task.output] + task_outputs.append(task.output) + if not task.async_execution: last_sync_output = task.output return ( TaskExecutionData(agent=None, tools=[], should_skip=True), @@ -183,7 +184,9 @@ def prepare_task_execution( tools_for_task, ) - crew._log_task_start(task, agent_to_use.role) + executor = agent_to_use.agent_executor + if not (executor and executor._resuming): + crew._log_task_start(task, agent_to_use.role) return ( TaskExecutionData(agent=agent_to_use, tools=tools_for_task), @@ -275,10 +278,15 @@ def prepare_kickoff( """ from crewai.events.base_events import reset_emission_counter from crewai.events.event_bus import crewai_event_bus - from crewai.events.event_context import get_current_parent_id, reset_last_event_id + from crewai.events.event_context import ( + get_current_parent_id, + reset_last_event_id, + ) from crewai.events.types.crew_events import CrewKickoffStartedEvent - if get_current_parent_id() is None: + resuming = crew.checkpoint_kickoff_event_id is not None + + if not resuming and get_current_parent_id() is None: reset_emission_counter() reset_last_event_id() @@ -296,14 +304,29 @@ def prepare_kickoff( normalized = {} normalized = before_callback(normalized) - started_event = CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized) - crew._kickoff_event_id = started_event.event_id - future = crewai_event_bus.emit(crew, started_event) - if future is not None: - try: - future.result() - except Exception: # noqa: S110 - pass + if resuming and crew._kickoff_event_id: + if crew.verbose: + from crewai.events.utils.console_formatter import ConsoleFormatter + + fmt = ConsoleFormatter(verbose=True) + content = fmt.create_status_content( + "Resuming from Checkpoint", + crew.name or "Crew", + "bright_magenta", + ID=str(crew.id), + ) + fmt.print_panel( + content, "\U0001f504 Resuming from Checkpoint", "bright_magenta" + ) + else: + started_event = CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized) + crew._kickoff_event_id = started_event.event_id + future = crewai_event_bus.emit(crew, started_event) + if future is not None: + try: + future.result() + except Exception: # noqa: S110 + pass crew._task_output_handler.reset() crew._logging_color = "bold_purple" diff --git a/lib/crewai/src/crewai/events/event_bus.py b/lib/crewai/src/crewai/events/event_bus.py index eefe1ad88..c2a2956a7 100644 --- a/lib/crewai/src/crewai/events/event_bus.py +++ b/lib/crewai/src/crewai/events/event_bus.py @@ -5,17 +5,24 @@ of events throughout the CrewAI system, supporting both synchronous and asynchro event handlers with optional dependency management. """ +from __future__ import annotations + import asyncio import atexit from collections.abc import Callable, Generator from concurrent.futures import Future, ThreadPoolExecutor from contextlib import contextmanager import contextvars +import logging import threading -from typing import Any, Final, ParamSpec, TypeVar +from typing import TYPE_CHECKING, Any, Final, ParamSpec, TypeVar from typing_extensions import Self + +if TYPE_CHECKING: + from crewai.state.runtime import RuntimeState + from crewai.events.base_events import BaseEvent, get_next_emission_sequence from crewai.events.depends import Depends from crewai.events.event_context import ( @@ -43,10 +50,16 @@ from crewai.events.types.event_bus_types import ( ) from crewai.events.types.llm_events import LLMStreamChunkEvent from crewai.events.utils.console_formatter import ConsoleFormatter -from crewai.events.utils.handlers import is_async_handler, is_call_handler_safe +from crewai.events.utils.handlers import ( + _get_param_count, + is_async_handler, + is_call_handler_safe, +) from crewai.utilities.rw_lock import RWLock +logger = logging.getLogger(__name__) + P = ParamSpec("P") R = TypeVar("R") @@ -87,6 +100,7 @@ class CrewAIEventsBus: _futures_lock: threading.Lock _executor_initialized: bool _has_pending_events: bool + _runtime_state: RuntimeState | None def __new__(cls) -> Self: """Create or return the singleton instance. @@ -122,6 +136,8 @@ class CrewAIEventsBus: # Lazy initialization flags - executor and loop created on first emit self._executor_initialized = False self._has_pending_events = False + self._runtime_state: RuntimeState | None = None + self._registered_entity_ids: set[int] = set() def _ensure_executor_initialized(self) -> None: """Lazily initialize the thread pool executor and event loop. @@ -209,25 +225,16 @@ class CrewAIEventsBus: ) -> Callable[[Callable[P, R]], Callable[P, R]]: """Decorator to register an event handler for a specific event type. + Handlers can accept 2 or 3 arguments: + - ``(source, event)`` — standard handler + - ``(source, event, state: RuntimeState)`` — handler with runtime state + Args: event_type: The event class to listen for - depends_on: Optional dependency or list of dependencies. Handlers with - dependencies will execute after their dependencies complete. + depends_on: Optional dependency or list of dependencies. Returns: Decorator function that registers the handler - - Example: - >>> from crewai.events import crewai_event_bus, Depends - >>> from crewai.events.types.llm_events import LLMCallStartedEvent - >>> - >>> @crewai_event_bus.on(LLMCallStartedEvent) - >>> def setup_context(source, event): - ... print("Setting up context") - >>> - >>> @crewai_event_bus.on(LLMCallStartedEvent, depends_on=Depends(setup_context)) - >>> def process(source, event): - ... print("Processing (runs after setup_context)") """ def decorator(handler: Callable[P, R]) -> Callable[P, R]: @@ -248,6 +255,42 @@ class CrewAIEventsBus: return decorator + def set_runtime_state(self, state: RuntimeState) -> None: + """Set the RuntimeState that will be passed to event handlers.""" + with self._instance_lock: + self._runtime_state = state + self._registered_entity_ids = {id(e) for e in state.root} + + def register_entity(self, entity: Any) -> None: + """Add an entity to the RuntimeState, creating it if needed. + + Agents that belong to an already-registered Crew are tracked + but not appended to root, since they are serialized as part + of the Crew's agents list. + """ + eid = id(entity) + if eid in self._registered_entity_ids: + return + with self._instance_lock: + if eid in self._registered_entity_ids: + return + self._registered_entity_ids.add(eid) + if getattr(entity, "entity_type", None) == "agent": + crew = getattr(entity, "crew", None) + if crew is not None and id(crew) in self._registered_entity_ids: + return + if self._runtime_state is None: + from crewai import RuntimeState + + if RuntimeState is None: + logger.warning( + "RuntimeState unavailable; skipping entity registration." + ) + return + self._runtime_state = RuntimeState(root=[entity]) + else: + self._runtime_state.root.append(entity) + def off( self, event_type: type[BaseEvent], @@ -294,10 +337,12 @@ class CrewAIEventsBus: event: The event instance handlers: Frozenset of sync handlers to call """ + state = self._runtime_state errors: list[tuple[SyncHandler, Exception]] = [ (handler, error) for handler in handlers - if (error := is_call_handler_safe(handler, source, event)) is not None + if (error := is_call_handler_safe(handler, source, event, state)) + is not None ] if errors: @@ -319,7 +364,14 @@ class CrewAIEventsBus: event: The event instance handlers: Frozenset of async handlers to call """ - coros = [handler(source, event) for handler in handlers] + state = self._runtime_state + + async def _call(handler: AsyncHandler) -> Any: + if _get_param_count(handler) >= 3: + return await handler(source, event, state) # type: ignore[call-arg] + return await handler(source, event) # type: ignore[call-arg] + + coros = [_call(handler) for handler in handlers] results = await asyncio.gather(*coros, return_exceptions=True) for handler, result in zip(handlers, results, strict=False): if isinstance(result, Exception): @@ -391,6 +443,53 @@ class CrewAIEventsBus: if level_async: await self._acall_handlers(source, event, level_async) + def _register_source(self, source: Any) -> None: + """Register the source entity in RuntimeState if applicable.""" + if ( + getattr(source, "entity_type", None) in ("flow", "crew", "agent") + and id(source) not in self._registered_entity_ids + ): + self.register_entity(source) + + def _record_event(self, event: BaseEvent) -> None: + """Add an event to the RuntimeState event record.""" + if self._runtime_state is not None: + self._runtime_state.event_record.add(event) + + def _prepare_event(self, source: Any, event: BaseEvent) -> None: + """Register source, set scope/sequence metadata, and record the event. + + This method mutates ContextVar state (scope stack, last_event_id) + and must only be called from synchronous emit paths. + """ + self._register_source(source) + + event.previous_event_id = get_last_event_id() + event.triggered_by_event_id = get_triggering_event_id() + event.emission_sequence = get_next_emission_sequence() + if event.parent_event_id is None: + event_type_name = event.type + if event_type_name in SCOPE_ENDING_EVENTS: + event.parent_event_id = get_enclosing_parent_id() + popped = pop_event_scope() + if popped is None: + handle_empty_pop(event_type_name) + else: + popped_event_id, popped_type = popped + event.started_event_id = popped_event_id + expected_start = VALID_EVENT_PAIRS.get(event_type_name) + if expected_start and popped_type and popped_type != expected_start: + handle_mismatch(event_type_name, popped_type, expected_start) + elif event_type_name in SCOPE_STARTING_EVENTS: + event.parent_event_id = get_current_parent_id() + push_event_scope(event.event_id, event_type_name) + else: + event.parent_event_id = get_current_parent_id() + + set_last_event_id(event.event_id) + + self._record_event(event) + def emit(self, source: Any, event: BaseEvent) -> Future[None] | None: """Emit an event to all registered handlers. @@ -417,29 +516,8 @@ class CrewAIEventsBus: ... await asyncio.wrap_future(future) # In async test ... # or future.result(timeout=5.0) in sync code """ - event.previous_event_id = get_last_event_id() - event.triggered_by_event_id = get_triggering_event_id() - event.emission_sequence = get_next_emission_sequence() - if event.parent_event_id is None: - event_type_name = event.type - if event_type_name in SCOPE_ENDING_EVENTS: - event.parent_event_id = get_enclosing_parent_id() - popped = pop_event_scope() - if popped is None: - handle_empty_pop(event_type_name) - else: - popped_event_id, popped_type = popped - event.started_event_id = popped_event_id - expected_start = VALID_EVENT_PAIRS.get(event_type_name) - if expected_start and popped_type and popped_type != expected_start: - handle_mismatch(event_type_name, popped_type, expected_start) - elif event_type_name in SCOPE_STARTING_EVENTS: - event.parent_event_id = get_current_parent_id() - push_event_scope(event.event_id, event_type_name) - else: - event.parent_event_id = get_current_parent_id() + self._prepare_event(source, event) - set_last_event_id(event.event_id) event_type = type(event) with self._rwlock.r_locked(): @@ -538,6 +616,10 @@ class CrewAIEventsBus: source: The object emitting the event event: The event instance to emit """ + self._register_source(source) + event.emission_sequence = get_next_emission_sequence() + self._record_event(event) + event_type = type(event) with self._rwlock.r_locked(): diff --git a/lib/crewai/src/crewai/events/event_context.py b/lib/crewai/src/crewai/events/event_context.py index 672daf786..bcb3de1a2 100644 --- a/lib/crewai/src/crewai/events/event_context.py +++ b/lib/crewai/src/crewai/events/event_context.py @@ -133,6 +133,11 @@ def triggered_by_scope(event_id: str) -> Generator[None, None, None]: _triggering_event_id.set(previous) +def restore_event_scope(stack: tuple[tuple[str, str], ...]) -> None: + """Restore the event scope stack from a checkpoint.""" + _event_id_stack.set(stack) + + def push_event_scope(event_id: str, event_type: str = "") -> None: """Push an event ID and type onto the scope stack.""" config = _event_context_config.get() or _default_config diff --git a/lib/crewai/src/crewai/events/types/a2a_events.py b/lib/crewai/src/crewai/events/types/a2a_events.py index 55de064f8..4131a1fea 100644 --- a/lib/crewai/src/crewai/events/types/a2a_events.py +++ b/lib/crewai/src/crewai/events/types/a2a_events.py @@ -73,7 +73,7 @@ class A2ADelegationStartedEvent(A2AEventBase): extensions: List of A2A extension URIs in use. """ - type: str = "a2a_delegation_started" + type: Literal["a2a_delegation_started"] = "a2a_delegation_started" endpoint: str task_description: str agent_id: str @@ -106,7 +106,7 @@ class A2ADelegationCompletedEvent(A2AEventBase): extensions: List of A2A extension URIs in use. """ - type: str = "a2a_delegation_completed" + type: Literal["a2a_delegation_completed"] = "a2a_delegation_completed" status: str result: str | None = None error: str | None = None @@ -140,7 +140,7 @@ class A2AConversationStartedEvent(A2AEventBase): extensions: List of A2A extension URIs in use. """ - type: str = "a2a_conversation_started" + type: Literal["a2a_conversation_started"] = "a2a_conversation_started" agent_id: str endpoint: str context_id: str | None = None @@ -171,7 +171,7 @@ class A2AMessageSentEvent(A2AEventBase): extensions: List of A2A extension URIs in use. """ - type: str = "a2a_message_sent" + type: Literal["a2a_message_sent"] = "a2a_message_sent" message: str turn_number: int context_id: str | None = None @@ -203,7 +203,7 @@ class A2AResponseReceivedEvent(A2AEventBase): extensions: List of A2A extension URIs in use. """ - type: str = "a2a_response_received" + type: Literal["a2a_response_received"] = "a2a_response_received" response: str turn_number: int context_id: str | None = None @@ -237,7 +237,7 @@ class A2AConversationCompletedEvent(A2AEventBase): extensions: List of A2A extension URIs in use. """ - type: str = "a2a_conversation_completed" + type: Literal["a2a_conversation_completed"] = "a2a_conversation_completed" status: Literal["completed", "failed"] final_result: str | None = None error: str | None = None @@ -263,7 +263,7 @@ class A2APollingStartedEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_polling_started" + type: Literal["a2a_polling_started"] = "a2a_polling_started" task_id: str context_id: str | None = None polling_interval: float @@ -286,7 +286,7 @@ class A2APollingStatusEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_polling_status" + type: Literal["a2a_polling_status"] = "a2a_polling_status" task_id: str context_id: str | None = None state: str @@ -309,7 +309,9 @@ class A2APushNotificationRegisteredEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_push_notification_registered" + type: Literal["a2a_push_notification_registered"] = ( + "a2a_push_notification_registered" + ) task_id: str context_id: str | None = None callback_url: str @@ -334,7 +336,7 @@ class A2APushNotificationReceivedEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_push_notification_received" + type: Literal["a2a_push_notification_received"] = "a2a_push_notification_received" task_id: str context_id: str | None = None state: str @@ -359,7 +361,7 @@ class A2APushNotificationSentEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_push_notification_sent" + type: Literal["a2a_push_notification_sent"] = "a2a_push_notification_sent" task_id: str context_id: str | None = None callback_url: str @@ -381,7 +383,7 @@ class A2APushNotificationTimeoutEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_push_notification_timeout" + type: Literal["a2a_push_notification_timeout"] = "a2a_push_notification_timeout" task_id: str context_id: str | None = None timeout_seconds: float @@ -405,7 +407,7 @@ class A2AStreamingStartedEvent(A2AEventBase): extensions: List of A2A extension URIs in use. """ - type: str = "a2a_streaming_started" + type: Literal["a2a_streaming_started"] = "a2a_streaming_started" task_id: str | None = None context_id: str | None = None endpoint: str @@ -434,7 +436,7 @@ class A2AStreamingChunkEvent(A2AEventBase): extensions: List of A2A extension URIs in use. """ - type: str = "a2a_streaming_chunk" + type: Literal["a2a_streaming_chunk"] = "a2a_streaming_chunk" task_id: str | None = None context_id: str | None = None chunk: str @@ -462,7 +464,7 @@ class A2AAgentCardFetchedEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_agent_card_fetched" + type: Literal["a2a_agent_card_fetched"] = "a2a_agent_card_fetched" endpoint: str a2a_agent_name: str | None = None agent_card: dict[str, Any] | None = None @@ -486,7 +488,7 @@ class A2AAuthenticationFailedEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_authentication_failed" + type: Literal["a2a_authentication_failed"] = "a2a_authentication_failed" endpoint: str auth_type: str | None = None error: str @@ -517,7 +519,7 @@ class A2AArtifactReceivedEvent(A2AEventBase): extensions: List of A2A extension URIs in use. """ - type: str = "a2a_artifact_received" + type: Literal["a2a_artifact_received"] = "a2a_artifact_received" task_id: str artifact_id: str artifact_name: str | None = None @@ -550,7 +552,7 @@ class A2AConnectionErrorEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_connection_error" + type: Literal["a2a_connection_error"] = "a2a_connection_error" endpoint: str error: str error_type: str | None = None @@ -571,7 +573,7 @@ class A2AServerTaskStartedEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_server_task_started" + type: Literal["a2a_server_task_started"] = "a2a_server_task_started" task_id: str context_id: str metadata: dict[str, Any] | None = None @@ -587,7 +589,7 @@ class A2AServerTaskCompletedEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_server_task_completed" + type: Literal["a2a_server_task_completed"] = "a2a_server_task_completed" task_id: str context_id: str result: str @@ -603,7 +605,7 @@ class A2AServerTaskCanceledEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_server_task_canceled" + type: Literal["a2a_server_task_canceled"] = "a2a_server_task_canceled" task_id: str context_id: str metadata: dict[str, Any] | None = None @@ -619,7 +621,7 @@ class A2AServerTaskFailedEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_server_task_failed" + type: Literal["a2a_server_task_failed"] = "a2a_server_task_failed" task_id: str context_id: str error: str @@ -634,7 +636,7 @@ class A2AParallelDelegationStartedEvent(A2AEventBase): task_description: Description of the task being delegated. """ - type: str = "a2a_parallel_delegation_started" + type: Literal["a2a_parallel_delegation_started"] = "a2a_parallel_delegation_started" endpoints: list[str] task_description: str @@ -649,7 +651,9 @@ class A2AParallelDelegationCompletedEvent(A2AEventBase): results: Summary of results from each agent. """ - type: str = "a2a_parallel_delegation_completed" + type: Literal["a2a_parallel_delegation_completed"] = ( + "a2a_parallel_delegation_completed" + ) endpoints: list[str] success_count: int failure_count: int @@ -675,7 +679,7 @@ class A2ATransportNegotiatedEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_transport_negotiated" + type: Literal["a2a_transport_negotiated"] = "a2a_transport_negotiated" endpoint: str a2a_agent_name: str | None = None negotiated_transport: str @@ -708,7 +712,7 @@ class A2AContentTypeNegotiatedEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_content_type_negotiated" + type: Literal["a2a_content_type_negotiated"] = "a2a_content_type_negotiated" endpoint: str a2a_agent_name: str | None = None skill_name: str | None = None @@ -738,7 +742,7 @@ class A2AContextCreatedEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_context_created" + type: Literal["a2a_context_created"] = "a2a_context_created" context_id: str created_at: float metadata: dict[str, Any] | None = None @@ -755,7 +759,7 @@ class A2AContextExpiredEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_context_expired" + type: Literal["a2a_context_expired"] = "a2a_context_expired" context_id: str created_at: float age_seconds: float @@ -775,7 +779,7 @@ class A2AContextIdleEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_context_idle" + type: Literal["a2a_context_idle"] = "a2a_context_idle" context_id: str idle_seconds: float task_count: int @@ -792,7 +796,7 @@ class A2AContextCompletedEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_context_completed" + type: Literal["a2a_context_completed"] = "a2a_context_completed" context_id: str total_tasks: int duration_seconds: float @@ -811,7 +815,7 @@ class A2AContextPrunedEvent(A2AEventBase): metadata: Custom A2A metadata key-value pairs. """ - type: str = "a2a_context_pruned" + type: Literal["a2a_context_pruned"] = "a2a_context_pruned" context_id: str task_count: int age_seconds: float diff --git a/lib/crewai/src/crewai/events/types/agent_events.py b/lib/crewai/src/crewai/events/types/agent_events.py index 49e24e059..8c811d176 100644 --- a/lib/crewai/src/crewai/events/types/agent_events.py +++ b/lib/crewai/src/crewai/events/types/agent_events.py @@ -3,7 +3,7 @@ from __future__ import annotations from collections.abc import Sequence -from typing import Any +from typing import Any, Literal from pydantic import ConfigDict, model_validator from typing_extensions import Self @@ -21,7 +21,7 @@ class AgentExecutionStartedEvent(BaseEvent): task: Any tools: Sequence[BaseTool | CrewStructuredTool] | None task_prompt: str - type: str = "agent_execution_started" + type: Literal["agent_execution_started"] = "agent_execution_started" model_config = ConfigDict(arbitrary_types_allowed=True) @@ -38,7 +38,7 @@ class AgentExecutionCompletedEvent(BaseEvent): agent: BaseAgent task: Any output: str - type: str = "agent_execution_completed" + type: Literal["agent_execution_completed"] = "agent_execution_completed" model_config = ConfigDict(arbitrary_types_allowed=True) @@ -55,7 +55,7 @@ class AgentExecutionErrorEvent(BaseEvent): agent: BaseAgent task: Any error: str - type: str = "agent_execution_error" + type: Literal["agent_execution_error"] = "agent_execution_error" model_config = ConfigDict(arbitrary_types_allowed=True) @@ -73,7 +73,7 @@ class LiteAgentExecutionStartedEvent(BaseEvent): agent_info: dict[str, Any] tools: Sequence[BaseTool | CrewStructuredTool] | None messages: str | list[dict[str, str]] - type: str = "lite_agent_execution_started" + type: Literal["lite_agent_execution_started"] = "lite_agent_execution_started" model_config = ConfigDict(arbitrary_types_allowed=True) @@ -83,7 +83,7 @@ class LiteAgentExecutionCompletedEvent(BaseEvent): agent_info: dict[str, Any] output: str - type: str = "lite_agent_execution_completed" + type: Literal["lite_agent_execution_completed"] = "lite_agent_execution_completed" class LiteAgentExecutionErrorEvent(BaseEvent): @@ -91,7 +91,7 @@ class LiteAgentExecutionErrorEvent(BaseEvent): agent_info: dict[str, Any] error: str - type: str = "lite_agent_execution_error" + type: Literal["lite_agent_execution_error"] = "lite_agent_execution_error" # Agent Eval events @@ -100,7 +100,7 @@ class AgentEvaluationStartedEvent(BaseEvent): agent_role: str task_id: str | None = None iteration: int - type: str = "agent_evaluation_started" + type: Literal["agent_evaluation_started"] = "agent_evaluation_started" class AgentEvaluationCompletedEvent(BaseEvent): @@ -110,7 +110,7 @@ class AgentEvaluationCompletedEvent(BaseEvent): iteration: int metric_category: Any score: Any - type: str = "agent_evaluation_completed" + type: Literal["agent_evaluation_completed"] = "agent_evaluation_completed" class AgentEvaluationFailedEvent(BaseEvent): @@ -119,7 +119,7 @@ class AgentEvaluationFailedEvent(BaseEvent): task_id: str | None = None iteration: int error: str - type: str = "agent_evaluation_failed" + type: Literal["agent_evaluation_failed"] = "agent_evaluation_failed" def _set_agent_fingerprint(event: BaseEvent, agent: BaseAgent) -> None: diff --git a/lib/crewai/src/crewai/events/types/crew_events.py b/lib/crewai/src/crewai/events/types/crew_events.py index fa198f5ae..cf71cbfe3 100644 --- a/lib/crewai/src/crewai/events/types/crew_events.py +++ b/lib/crewai/src/crewai/events/types/crew_events.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, Literal from crewai.events.base_events import BaseEvent @@ -37,14 +37,14 @@ class CrewKickoffStartedEvent(CrewBaseEvent): """Event emitted when a crew starts execution""" inputs: dict[str, Any] | None - type: str = "crew_kickoff_started" + type: Literal["crew_kickoff_started"] = "crew_kickoff_started" class CrewKickoffCompletedEvent(CrewBaseEvent): """Event emitted when a crew completes execution""" output: Any - type: str = "crew_kickoff_completed" + type: Literal["crew_kickoff_completed"] = "crew_kickoff_completed" total_tokens: int = 0 @@ -52,7 +52,7 @@ class CrewKickoffFailedEvent(CrewBaseEvent): """Event emitted when a crew fails to complete execution""" error: str - type: str = "crew_kickoff_failed" + type: Literal["crew_kickoff_failed"] = "crew_kickoff_failed" class CrewTrainStartedEvent(CrewBaseEvent): @@ -61,7 +61,7 @@ class CrewTrainStartedEvent(CrewBaseEvent): n_iterations: int filename: str inputs: dict[str, Any] | None - type: str = "crew_train_started" + type: Literal["crew_train_started"] = "crew_train_started" class CrewTrainCompletedEvent(CrewBaseEvent): @@ -69,14 +69,14 @@ class CrewTrainCompletedEvent(CrewBaseEvent): n_iterations: int filename: str - type: str = "crew_train_completed" + type: Literal["crew_train_completed"] = "crew_train_completed" class CrewTrainFailedEvent(CrewBaseEvent): """Event emitted when a crew fails to complete training""" error: str - type: str = "crew_train_failed" + type: Literal["crew_train_failed"] = "crew_train_failed" class CrewTestStartedEvent(CrewBaseEvent): @@ -85,20 +85,20 @@ class CrewTestStartedEvent(CrewBaseEvent): n_iterations: int eval_llm: str | Any | None inputs: dict[str, Any] | None - type: str = "crew_test_started" + type: Literal["crew_test_started"] = "crew_test_started" class CrewTestCompletedEvent(CrewBaseEvent): """Event emitted when a crew completes testing""" - type: str = "crew_test_completed" + type: Literal["crew_test_completed"] = "crew_test_completed" class CrewTestFailedEvent(CrewBaseEvent): """Event emitted when a crew fails to complete testing""" error: str - type: str = "crew_test_failed" + type: Literal["crew_test_failed"] = "crew_test_failed" class CrewTestResultEvent(CrewBaseEvent): @@ -107,4 +107,4 @@ class CrewTestResultEvent(CrewBaseEvent): quality: float execution_duration: float model: str - type: str = "crew_test_result" + type: Literal["crew_test_result"] = "crew_test_result" diff --git a/lib/crewai/src/crewai/events/types/event_bus_types.py b/lib/crewai/src/crewai/events/types/event_bus_types.py index 8a650a731..677f6ce93 100644 --- a/lib/crewai/src/crewai/events/types/event_bus_types.py +++ b/lib/crewai/src/crewai/events/types/event_bus_types.py @@ -6,10 +6,17 @@ from typing import Any, TypeAlias from crewai.events.base_events import BaseEvent -SyncHandler: TypeAlias = Callable[[Any, BaseEvent], None] -AsyncHandler: TypeAlias = Callable[[Any, BaseEvent], Coroutine[Any, Any, None]] +SyncHandler: TypeAlias = ( + Callable[[Any, BaseEvent], None] | Callable[[Any, BaseEvent, Any], None] +) +AsyncHandler: TypeAlias = ( + Callable[[Any, BaseEvent], Coroutine[Any, Any, None]] + | Callable[[Any, BaseEvent, Any], Coroutine[Any, Any, None]] +) SyncHandlerSet: TypeAlias = frozenset[SyncHandler] AsyncHandlerSet: TypeAlias = frozenset[AsyncHandler] -Handler: TypeAlias = Callable[[Any, BaseEvent], Any] +Handler: TypeAlias = ( + Callable[[Any, BaseEvent], Any] | Callable[[Any, BaseEvent, Any], Any] +) ExecutionPlan: TypeAlias = list[set[Handler]] diff --git a/lib/crewai/src/crewai/events/types/flow_events.py b/lib/crewai/src/crewai/events/types/flow_events.py index d820b8a05..c2c1e2912 100644 --- a/lib/crewai/src/crewai/events/types/flow_events.py +++ b/lib/crewai/src/crewai/events/types/flow_events.py @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, Literal from pydantic import BaseModel, ConfigDict @@ -17,14 +17,14 @@ class FlowStartedEvent(FlowEvent): flow_name: str inputs: dict[str, Any] | None = None - type: str = "flow_started" + type: Literal["flow_started"] = "flow_started" class FlowCreatedEvent(FlowEvent): """Event emitted when a flow is created""" flow_name: str - type: str = "flow_created" + type: Literal["flow_created"] = "flow_created" class MethodExecutionStartedEvent(FlowEvent): @@ -34,7 +34,7 @@ class MethodExecutionStartedEvent(FlowEvent): method_name: str state: dict[str, Any] | BaseModel params: dict[str, Any] | None = None - type: str = "method_execution_started" + type: Literal["method_execution_started"] = "method_execution_started" class MethodExecutionFinishedEvent(FlowEvent): @@ -44,7 +44,7 @@ class MethodExecutionFinishedEvent(FlowEvent): method_name: str result: Any = None state: dict[str, Any] | BaseModel - type: str = "method_execution_finished" + type: Literal["method_execution_finished"] = "method_execution_finished" class MethodExecutionFailedEvent(FlowEvent): @@ -53,7 +53,7 @@ class MethodExecutionFailedEvent(FlowEvent): flow_name: str method_name: str error: Exception - type: str = "method_execution_failed" + type: Literal["method_execution_failed"] = "method_execution_failed" model_config = ConfigDict(arbitrary_types_allowed=True) @@ -78,7 +78,7 @@ class MethodExecutionPausedEvent(FlowEvent): flow_id: str message: str emit: list[str] | None = None - type: str = "method_execution_paused" + type: Literal["method_execution_paused"] = "method_execution_paused" class FlowFinishedEvent(FlowEvent): @@ -86,7 +86,7 @@ class FlowFinishedEvent(FlowEvent): flow_name: str result: Any | None = None - type: str = "flow_finished" + type: Literal["flow_finished"] = "flow_finished" state: dict[str, Any] | BaseModel @@ -110,14 +110,14 @@ class FlowPausedEvent(FlowEvent): state: dict[str, Any] | BaseModel message: str emit: list[str] | None = None - type: str = "flow_paused" + type: Literal["flow_paused"] = "flow_paused" class FlowPlotEvent(FlowEvent): """Event emitted when a flow plot is created""" flow_name: str - type: str = "flow_plot" + type: Literal["flow_plot"] = "flow_plot" class FlowInputRequestedEvent(FlowEvent): @@ -138,7 +138,7 @@ class FlowInputRequestedEvent(FlowEvent): method_name: str message: str metadata: dict[str, Any] | None = None - type: str = "flow_input_requested" + type: Literal["flow_input_requested"] = "flow_input_requested" class FlowInputReceivedEvent(FlowEvent): @@ -163,7 +163,7 @@ class FlowInputReceivedEvent(FlowEvent): response: str | None = None metadata: dict[str, Any] | None = None response_metadata: dict[str, Any] | None = None - type: str = "flow_input_received" + type: Literal["flow_input_received"] = "flow_input_received" class HumanFeedbackRequestedEvent(FlowEvent): @@ -187,7 +187,7 @@ class HumanFeedbackRequestedEvent(FlowEvent): message: str emit: list[str] | None = None request_id: str | None = None - type: str = "human_feedback_requested" + type: Literal["human_feedback_requested"] = "human_feedback_requested" class HumanFeedbackReceivedEvent(FlowEvent): @@ -209,4 +209,4 @@ class HumanFeedbackReceivedEvent(FlowEvent): feedback: str outcome: str | None = None request_id: str | None = None - type: str = "human_feedback_received" + type: Literal["human_feedback_received"] = "human_feedback_received" diff --git a/lib/crewai/src/crewai/events/types/knowledge_events.py b/lib/crewai/src/crewai/events/types/knowledge_events.py index a2d9af728..086e89377 100644 --- a/lib/crewai/src/crewai/events/types/knowledge_events.py +++ b/lib/crewai/src/crewai/events/types/knowledge_events.py @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, Literal from crewai.events.base_events import BaseEvent @@ -20,14 +20,16 @@ class KnowledgeEventBase(BaseEvent): class KnowledgeRetrievalStartedEvent(KnowledgeEventBase): """Event emitted when a knowledge retrieval is started.""" - type: str = "knowledge_search_query_started" + type: Literal["knowledge_search_query_started"] = "knowledge_search_query_started" class KnowledgeRetrievalCompletedEvent(KnowledgeEventBase): """Event emitted when a knowledge retrieval is completed.""" query: str - type: str = "knowledge_search_query_completed" + type: Literal["knowledge_search_query_completed"] = ( + "knowledge_search_query_completed" + ) retrieved_knowledge: str @@ -35,13 +37,13 @@ class KnowledgeQueryStartedEvent(KnowledgeEventBase): """Event emitted when a knowledge query is started.""" task_prompt: str - type: str = "knowledge_query_started" + type: Literal["knowledge_query_started"] = "knowledge_query_started" class KnowledgeQueryFailedEvent(KnowledgeEventBase): """Event emitted when a knowledge query fails.""" - type: str = "knowledge_query_failed" + type: Literal["knowledge_query_failed"] = "knowledge_query_failed" error: str @@ -49,12 +51,12 @@ class KnowledgeQueryCompletedEvent(KnowledgeEventBase): """Event emitted when a knowledge query is completed.""" query: str - type: str = "knowledge_query_completed" + type: Literal["knowledge_query_completed"] = "knowledge_query_completed" class KnowledgeSearchQueryFailedEvent(KnowledgeEventBase): """Event emitted when a knowledge search query fails.""" query: str - type: str = "knowledge_search_query_failed" + type: Literal["knowledge_search_query_failed"] = "knowledge_search_query_failed" error: str diff --git a/lib/crewai/src/crewai/events/types/llm_events.py b/lib/crewai/src/crewai/events/types/llm_events.py index 4b8c96d9e..b138f908c 100644 --- a/lib/crewai/src/crewai/events/types/llm_events.py +++ b/lib/crewai/src/crewai/events/types/llm_events.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Any +from typing import Any, Literal from pydantic import BaseModel @@ -43,7 +43,7 @@ class LLMCallStartedEvent(LLMEventBase): multimodal content (text, images, etc.) """ - type: str = "llm_call_started" + type: Literal["llm_call_started"] = "llm_call_started" messages: str | list[dict[str, Any]] | None = None tools: list[dict[str, Any]] | None = None callbacks: list[Any] | None = None @@ -53,7 +53,7 @@ class LLMCallStartedEvent(LLMEventBase): class LLMCallCompletedEvent(LLMEventBase): """Event emitted when a LLM call completes""" - type: str = "llm_call_completed" + type: Literal["llm_call_completed"] = "llm_call_completed" messages: str | list[dict[str, Any]] | None = None response: Any call_type: LLMCallType @@ -64,7 +64,7 @@ class LLMCallFailedEvent(LLMEventBase): """Event emitted when a LLM call fails""" error: str - type: str = "llm_call_failed" + type: Literal["llm_call_failed"] = "llm_call_failed" class FunctionCall(BaseModel): @@ -82,7 +82,7 @@ class ToolCall(BaseModel): class LLMStreamChunkEvent(LLMEventBase): """Event emitted when a streaming chunk is received""" - type: str = "llm_stream_chunk" + type: Literal["llm_stream_chunk"] = "llm_stream_chunk" chunk: str tool_call: ToolCall | None = None call_type: LLMCallType | None = None @@ -92,6 +92,6 @@ class LLMStreamChunkEvent(LLMEventBase): class LLMThinkingChunkEvent(LLMEventBase): """Event emitted when a thinking/reasoning chunk is received from a thinking model""" - type: str = "llm_thinking_chunk" + type: Literal["llm_thinking_chunk"] = "llm_thinking_chunk" chunk: str response_id: str | None = None diff --git a/lib/crewai/src/crewai/events/types/llm_guardrail_events.py b/lib/crewai/src/crewai/events/types/llm_guardrail_events.py index fdf82cd2a..8bbcf6e0b 100644 --- a/lib/crewai/src/crewai/events/types/llm_guardrail_events.py +++ b/lib/crewai/src/crewai/events/types/llm_guardrail_events.py @@ -1,6 +1,6 @@ from collections.abc import Callable from inspect import getsource -from typing import Any +from typing import Any, Literal from crewai.events.base_events import BaseEvent @@ -27,7 +27,7 @@ class LLMGuardrailStartedEvent(LLMGuardrailBaseEvent): retry_count: The number of times the guardrail has been retried """ - type: str = "llm_guardrail_started" + type: Literal["llm_guardrail_started"] = "llm_guardrail_started" guardrail: str | Callable[..., Any] retry_count: int @@ -53,7 +53,7 @@ class LLMGuardrailCompletedEvent(LLMGuardrailBaseEvent): retry_count: The number of times the guardrail has been retried """ - type: str = "llm_guardrail_completed" + type: Literal["llm_guardrail_completed"] = "llm_guardrail_completed" success: bool result: Any error: str | None = None @@ -68,6 +68,6 @@ class LLMGuardrailFailedEvent(LLMGuardrailBaseEvent): retry_count: The number of times the guardrail has been retried """ - type: str = "llm_guardrail_failed" + type: Literal["llm_guardrail_failed"] = "llm_guardrail_failed" error: str retry_count: int diff --git a/lib/crewai/src/crewai/events/types/logging_events.py b/lib/crewai/src/crewai/events/types/logging_events.py index 31b8bdd1e..6bd0ff3e3 100644 --- a/lib/crewai/src/crewai/events/types/logging_events.py +++ b/lib/crewai/src/crewai/events/types/logging_events.py @@ -1,6 +1,6 @@ """Agent logging events that don't reference BaseAgent to avoid circular imports.""" -from typing import Any +from typing import Any, Literal from pydantic import ConfigDict @@ -13,7 +13,7 @@ class AgentLogsStartedEvent(BaseEvent): agent_role: str task_description: str | None = None verbose: bool = False - type: str = "agent_logs_started" + type: Literal["agent_logs_started"] = "agent_logs_started" class AgentLogsExecutionEvent(BaseEvent): @@ -22,6 +22,6 @@ class AgentLogsExecutionEvent(BaseEvent): agent_role: str formatted_answer: Any verbose: bool = False - type: str = "agent_logs_execution" + type: Literal["agent_logs_execution"] = "agent_logs_execution" model_config = ConfigDict(arbitrary_types_allowed=True) diff --git a/lib/crewai/src/crewai/events/types/mcp_events.py b/lib/crewai/src/crewai/events/types/mcp_events.py index a89d4df70..c9278dec0 100644 --- a/lib/crewai/src/crewai/events/types/mcp_events.py +++ b/lib/crewai/src/crewai/events/types/mcp_events.py @@ -1,5 +1,5 @@ from datetime import datetime -from typing import Any +from typing import Any, Literal from crewai.events.base_events import BaseEvent @@ -24,7 +24,7 @@ class MCPEvent(BaseEvent): class MCPConnectionStartedEvent(MCPEvent): """Event emitted when starting to connect to an MCP server.""" - type: str = "mcp_connection_started" + type: Literal["mcp_connection_started"] = "mcp_connection_started" connect_timeout: int | None = None is_reconnect: bool = ( False # True if this is a reconnection, False for first connection @@ -34,7 +34,7 @@ class MCPConnectionStartedEvent(MCPEvent): class MCPConnectionCompletedEvent(MCPEvent): """Event emitted when successfully connected to an MCP server.""" - type: str = "mcp_connection_completed" + type: Literal["mcp_connection_completed"] = "mcp_connection_completed" started_at: datetime | None = None completed_at: datetime | None = None connection_duration_ms: float | None = None @@ -46,7 +46,7 @@ class MCPConnectionCompletedEvent(MCPEvent): class MCPConnectionFailedEvent(MCPEvent): """Event emitted when connection to an MCP server fails.""" - type: str = "mcp_connection_failed" + type: Literal["mcp_connection_failed"] = "mcp_connection_failed" error: str error_type: str | None = None # "timeout", "authentication", "network", etc. started_at: datetime | None = None @@ -56,7 +56,7 @@ class MCPConnectionFailedEvent(MCPEvent): class MCPToolExecutionStartedEvent(MCPEvent): """Event emitted when starting to execute an MCP tool.""" - type: str = "mcp_tool_execution_started" + type: Literal["mcp_tool_execution_started"] = "mcp_tool_execution_started" tool_name: str tool_args: dict[str, Any] | None = None @@ -64,7 +64,7 @@ class MCPToolExecutionStartedEvent(MCPEvent): class MCPToolExecutionCompletedEvent(MCPEvent): """Event emitted when MCP tool execution completes.""" - type: str = "mcp_tool_execution_completed" + type: Literal["mcp_tool_execution_completed"] = "mcp_tool_execution_completed" tool_name: str tool_args: dict[str, Any] | None = None result: Any | None = None @@ -76,7 +76,7 @@ class MCPToolExecutionCompletedEvent(MCPEvent): class MCPToolExecutionFailedEvent(MCPEvent): """Event emitted when MCP tool execution fails.""" - type: str = "mcp_tool_execution_failed" + type: Literal["mcp_tool_execution_failed"] = "mcp_tool_execution_failed" tool_name: str tool_args: dict[str, Any] | None = None error: str @@ -92,7 +92,7 @@ class MCPConfigFetchFailedEvent(BaseEvent): failed, or native MCP resolution failed after config was fetched. """ - type: str = "mcp_config_fetch_failed" + type: Literal["mcp_config_fetch_failed"] = "mcp_config_fetch_failed" slug: str error: str error_type: str | None = None # "not_connected", "api_error", "connection_failed" diff --git a/lib/crewai/src/crewai/events/types/memory_events.py b/lib/crewai/src/crewai/events/types/memory_events.py index 0fd57a352..1d6b05017 100644 --- a/lib/crewai/src/crewai/events/types/memory_events.py +++ b/lib/crewai/src/crewai/events/types/memory_events.py @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, Literal from crewai.events.base_events import BaseEvent @@ -23,7 +23,7 @@ class MemoryBaseEvent(BaseEvent): class MemoryQueryStartedEvent(MemoryBaseEvent): """Event emitted when a memory query is started""" - type: str = "memory_query_started" + type: Literal["memory_query_started"] = "memory_query_started" query: str limit: int score_threshold: float | None = None @@ -32,7 +32,7 @@ class MemoryQueryStartedEvent(MemoryBaseEvent): class MemoryQueryCompletedEvent(MemoryBaseEvent): """Event emitted when a memory query is completed successfully""" - type: str = "memory_query_completed" + type: Literal["memory_query_completed"] = "memory_query_completed" query: str results: Any limit: int @@ -43,7 +43,7 @@ class MemoryQueryCompletedEvent(MemoryBaseEvent): class MemoryQueryFailedEvent(MemoryBaseEvent): """Event emitted when a memory query fails""" - type: str = "memory_query_failed" + type: Literal["memory_query_failed"] = "memory_query_failed" query: str limit: int score_threshold: float | None = None @@ -53,7 +53,7 @@ class MemoryQueryFailedEvent(MemoryBaseEvent): class MemorySaveStartedEvent(MemoryBaseEvent): """Event emitted when a memory save operation is started""" - type: str = "memory_save_started" + type: Literal["memory_save_started"] = "memory_save_started" value: str | None = None metadata: dict[str, Any] | None = None agent_role: str | None = None @@ -62,7 +62,7 @@ class MemorySaveStartedEvent(MemoryBaseEvent): class MemorySaveCompletedEvent(MemoryBaseEvent): """Event emitted when a memory save operation is completed successfully""" - type: str = "memory_save_completed" + type: Literal["memory_save_completed"] = "memory_save_completed" value: str metadata: dict[str, Any] | None = None agent_role: str | None = None @@ -72,7 +72,7 @@ class MemorySaveCompletedEvent(MemoryBaseEvent): class MemorySaveFailedEvent(MemoryBaseEvent): """Event emitted when a memory save operation fails""" - type: str = "memory_save_failed" + type: Literal["memory_save_failed"] = "memory_save_failed" value: str | None = None metadata: dict[str, Any] | None = None agent_role: str | None = None @@ -82,14 +82,14 @@ class MemorySaveFailedEvent(MemoryBaseEvent): class MemoryRetrievalStartedEvent(MemoryBaseEvent): """Event emitted when memory retrieval for a task prompt starts""" - type: str = "memory_retrieval_started" + type: Literal["memory_retrieval_started"] = "memory_retrieval_started" task_id: str | None = None class MemoryRetrievalCompletedEvent(MemoryBaseEvent): """Event emitted when memory retrieval for a task prompt completes successfully""" - type: str = "memory_retrieval_completed" + type: Literal["memory_retrieval_completed"] = "memory_retrieval_completed" task_id: str | None = None memory_content: str retrieval_time_ms: float @@ -98,6 +98,6 @@ class MemoryRetrievalCompletedEvent(MemoryBaseEvent): class MemoryRetrievalFailedEvent(MemoryBaseEvent): """Event emitted when memory retrieval for a task prompt fails.""" - type: str = "memory_retrieval_failed" + type: Literal["memory_retrieval_failed"] = "memory_retrieval_failed" task_id: str | None = None error: str diff --git a/lib/crewai/src/crewai/events/types/observation_events.py b/lib/crewai/src/crewai/events/types/observation_events.py index 2c95f3ae0..beac6d235 100644 --- a/lib/crewai/src/crewai/events/types/observation_events.py +++ b/lib/crewai/src/crewai/events/types/observation_events.py @@ -5,7 +5,7 @@ PlannerObserver analyzes step execution results and decides on plan continuation, refinement, or replanning. """ -from typing import Any +from typing import Any, Literal from crewai.events.base_events import BaseEvent @@ -32,7 +32,7 @@ class StepObservationStartedEvent(ObservationEvent): Fires after every step execution, before the observation LLM call. """ - type: str = "step_observation_started" + type: Literal["step_observation_started"] = "step_observation_started" class StepObservationCompletedEvent(ObservationEvent): @@ -42,7 +42,7 @@ class StepObservationCompletedEvent(ObservationEvent): the plan is still valid, and what action to take next. """ - type: str = "step_observation_completed" + type: Literal["step_observation_completed"] = "step_observation_completed" step_completed_successfully: bool = True key_information_learned: str = "" remaining_plan_still_valid: bool = True @@ -59,7 +59,7 @@ class StepObservationFailedEvent(ObservationEvent): but the event allows monitoring/alerting on observation failures. """ - type: str = "step_observation_failed" + type: Literal["step_observation_failed"] = "step_observation_failed" error: str = "" @@ -70,7 +70,7 @@ class PlanRefinementEvent(ObservationEvent): sharpening pending todo descriptions based on new information. """ - type: str = "plan_refinement" + type: Literal["plan_refinement"] = "plan_refinement" refined_step_count: int = 0 refinements: list[str] | None = None @@ -82,7 +82,7 @@ class PlanReplanTriggeredEvent(ObservationEvent): regenerated from scratch, preserving completed step results. """ - type: str = "plan_replan_triggered" + type: Literal["plan_replan_triggered"] = "plan_replan_triggered" replan_reason: str = "" replan_count: int = 0 completed_steps_preserved: int = 0 @@ -94,6 +94,6 @@ class GoalAchievedEarlyEvent(ObservationEvent): Remaining steps will be skipped and execution will finalize. """ - type: str = "goal_achieved_early" + type: Literal["goal_achieved_early"] = "goal_achieved_early" steps_remaining: int = 0 steps_completed: int = 0 diff --git a/lib/crewai/src/crewai/events/types/reasoning_events.py b/lib/crewai/src/crewai/events/types/reasoning_events.py index f9c9c1dc3..cb565a66e 100644 --- a/lib/crewai/src/crewai/events/types/reasoning_events.py +++ b/lib/crewai/src/crewai/events/types/reasoning_events.py @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, Literal from crewai.events.base_events import BaseEvent @@ -24,7 +24,7 @@ class ReasoningEvent(BaseEvent): class AgentReasoningStartedEvent(ReasoningEvent): """Event emitted when an agent starts reasoning about a task.""" - type: str = "agent_reasoning_started" + type: Literal["agent_reasoning_started"] = "agent_reasoning_started" agent_role: str task_id: str @@ -32,7 +32,7 @@ class AgentReasoningStartedEvent(ReasoningEvent): class AgentReasoningCompletedEvent(ReasoningEvent): """Event emitted when an agent finishes its reasoning process.""" - type: str = "agent_reasoning_completed" + type: Literal["agent_reasoning_completed"] = "agent_reasoning_completed" agent_role: str task_id: str plan: str @@ -42,7 +42,7 @@ class AgentReasoningCompletedEvent(ReasoningEvent): class AgentReasoningFailedEvent(ReasoningEvent): """Event emitted when the reasoning process fails.""" - type: str = "agent_reasoning_failed" + type: Literal["agent_reasoning_failed"] = "agent_reasoning_failed" agent_role: str task_id: str error: str diff --git a/lib/crewai/src/crewai/events/types/skill_events.py b/lib/crewai/src/crewai/events/types/skill_events.py index f99d6bd70..aab625dda 100644 --- a/lib/crewai/src/crewai/events/types/skill_events.py +++ b/lib/crewai/src/crewai/events/types/skill_events.py @@ -6,7 +6,7 @@ Events emitted during skill discovery, loading, and activation. from __future__ import annotations from pathlib import Path -from typing import Any +from typing import Any, Literal from crewai.events.base_events import BaseEvent @@ -28,14 +28,14 @@ class SkillEvent(BaseEvent): class SkillDiscoveryStartedEvent(SkillEvent): """Event emitted when skill discovery begins.""" - type: str = "skill_discovery_started" + type: Literal["skill_discovery_started"] = "skill_discovery_started" search_path: Path class SkillDiscoveryCompletedEvent(SkillEvent): """Event emitted when skill discovery completes.""" - type: str = "skill_discovery_completed" + type: Literal["skill_discovery_completed"] = "skill_discovery_completed" search_path: Path skills_found: int skill_names: list[str] @@ -44,19 +44,19 @@ class SkillDiscoveryCompletedEvent(SkillEvent): class SkillLoadedEvent(SkillEvent): """Event emitted when a skill is loaded at metadata level.""" - type: str = "skill_loaded" + type: Literal["skill_loaded"] = "skill_loaded" disclosure_level: int = 1 class SkillActivatedEvent(SkillEvent): """Event emitted when a skill is activated (promoted to instructions level).""" - type: str = "skill_activated" + type: Literal["skill_activated"] = "skill_activated" disclosure_level: int = 2 class SkillLoadFailedEvent(SkillEvent): """Event emitted when skill loading fails.""" - type: str = "skill_load_failed" + type: Literal["skill_load_failed"] = "skill_load_failed" error: str diff --git a/lib/crewai/src/crewai/events/types/task_events.py b/lib/crewai/src/crewai/events/types/task_events.py index 5d2fd746a..69609e3fd 100644 --- a/lib/crewai/src/crewai/events/types/task_events.py +++ b/lib/crewai/src/crewai/events/types/task_events.py @@ -1,12 +1,20 @@ -from typing import Any +from typing import Any, Literal from crewai.events.base_events import BaseEvent from crewai.tasks.task_output import TaskOutput def _set_task_fingerprint(event: BaseEvent, task: Any) -> None: - """Set fingerprint data on an event from a task object.""" - if task is not None and task.fingerprint: + """Set task identity and fingerprint data on an event.""" + if task is None: + return + task_id = getattr(task, "id", None) + if task_id is not None: + event.task_id = str(task_id) + task_name = getattr(task, "name", None) or getattr(task, "description", None) + if task_name: + event.task_name = task_name + if task.fingerprint: event.source_fingerprint = task.fingerprint.uuid_str event.source_type = "task" if task.fingerprint.metadata: @@ -16,7 +24,7 @@ def _set_task_fingerprint(event: BaseEvent, task: Any) -> None: class TaskStartedEvent(BaseEvent): """Event emitted when a task starts""" - type: str = "task_started" + type: Literal["task_started"] = "task_started" context: str | None task: Any | None = None @@ -29,7 +37,7 @@ class TaskCompletedEvent(BaseEvent): """Event emitted when a task completes""" output: TaskOutput - type: str = "task_completed" + type: Literal["task_completed"] = "task_completed" task: Any | None = None def __init__(self, **data: Any) -> None: @@ -41,7 +49,7 @@ class TaskFailedEvent(BaseEvent): """Event emitted when a task fails""" error: str - type: str = "task_failed" + type: Literal["task_failed"] = "task_failed" task: Any | None = None def __init__(self, **data: Any) -> None: @@ -52,7 +60,7 @@ class TaskFailedEvent(BaseEvent): class TaskEvaluationEvent(BaseEvent): """Event emitted when a task evaluation is completed""" - type: str = "task_evaluation" + type: Literal["task_evaluation"] = "task_evaluation" evaluation_type: str task: Any | None = None diff --git a/lib/crewai/src/crewai/events/types/tool_usage_events.py b/lib/crewai/src/crewai/events/types/tool_usage_events.py index c4e681546..44edbe0ac 100644 --- a/lib/crewai/src/crewai/events/types/tool_usage_events.py +++ b/lib/crewai/src/crewai/events/types/tool_usage_events.py @@ -1,6 +1,6 @@ from collections.abc import Callable from datetime import datetime -from typing import Any +from typing import Any, Literal from pydantic import ConfigDict @@ -55,7 +55,7 @@ class ToolUsageEvent(BaseEvent): class ToolUsageStartedEvent(ToolUsageEvent): """Event emitted when a tool execution is started""" - type: str = "tool_usage_started" + type: Literal["tool_usage_started"] = "tool_usage_started" class ToolUsageFinishedEvent(ToolUsageEvent): @@ -65,35 +65,35 @@ class ToolUsageFinishedEvent(ToolUsageEvent): finished_at: datetime from_cache: bool = False output: Any - type: str = "tool_usage_finished" + type: Literal["tool_usage_finished"] = "tool_usage_finished" class ToolUsageErrorEvent(ToolUsageEvent): """Event emitted when a tool execution encounters an error""" error: Any - type: str = "tool_usage_error" + type: Literal["tool_usage_error"] = "tool_usage_error" class ToolValidateInputErrorEvent(ToolUsageEvent): """Event emitted when a tool input validation encounters an error""" error: Any - type: str = "tool_validate_input_error" + type: Literal["tool_validate_input_error"] = "tool_validate_input_error" class ToolSelectionErrorEvent(ToolUsageEvent): """Event emitted when a tool selection encounters an error""" error: Any - type: str = "tool_selection_error" + type: Literal["tool_selection_error"] = "tool_selection_error" class ToolExecutionErrorEvent(BaseEvent): """Event emitted when a tool execution encounters an error""" error: Any - type: str = "tool_execution_error" + type: Literal["tool_execution_error"] = "tool_execution_error" tool_name: str tool_args: dict[str, Any] tool_class: Callable[..., Any] diff --git a/lib/crewai/src/crewai/events/utils/handlers.py b/lib/crewai/src/crewai/events/utils/handlers.py index bc3e76eee..48d21bd75 100644 --- a/lib/crewai/src/crewai/events/utils/handlers.py +++ b/lib/crewai/src/crewai/events/utils/handlers.py @@ -10,6 +10,23 @@ from crewai.events.base_events import BaseEvent from crewai.events.types.event_bus_types import AsyncHandler, SyncHandler +@functools.lru_cache(maxsize=256) +def _get_param_count_cached(handler: Any) -> int: + return len(inspect.signature(handler).parameters) + + +def _get_param_count(handler: Any) -> int: + """Return the number of parameters a handler accepts, with caching. + + Falls back to uncached introspection for unhashable handlers + like functools.partial. + """ + try: + return _get_param_count_cached(handler) + except TypeError: + return len(inspect.signature(handler).parameters) + + def is_async_handler( handler: Any, ) -> TypeIs[AsyncHandler]: @@ -41,6 +58,7 @@ def is_call_handler_safe( handler: SyncHandler, source: Any, event: BaseEvent, + state: Any = None, ) -> Exception | None: """Safely call a single handler and return any exception. @@ -48,12 +66,16 @@ def is_call_handler_safe( handler: The handler function to call source: The object that emitted the event event: The event instance + state: Optional RuntimeState passed as third arg if handler accepts it Returns: Exception if handler raised one, None otherwise """ try: - handler(source, event) + if _get_param_count(handler) >= 3: + handler(source, event, state) # type: ignore[call-arg] + else: + handler(source, event) # type: ignore[call-arg] return None except Exception as e: return e diff --git a/lib/crewai/src/crewai/experimental/agent_executor.py b/lib/crewai/src/crewai/experimental/agent_executor.py index 2b487071b..067489c8e 100644 --- a/lib/crewai/src/crewai/experimental/agent_executor.py +++ b/lib/crewai/src/crewai/experimental/agent_executor.py @@ -1,3 +1,4 @@ +# mypy: disable-error-code="union-attr,arg-type" from __future__ import annotations import asyncio @@ -21,7 +22,7 @@ from rich.console import Console from rich.text import Text from typing_extensions import Self -from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin +from crewai.agents.agent_builder.base_agent_executor import BaseAgentExecutor from crewai.agents.parser import ( AgentAction, AgentFinish, @@ -106,11 +107,8 @@ from crewai.utilities.types import LLMMessage if TYPE_CHECKING: - from crewai.agent import Agent from crewai.agents.tools_handler import ToolsHandler - from crewai.crew import Crew from crewai.llms.base_llm import BaseLLM - from crewai.task import Task from crewai.tools.tool_types import ToolResult from crewai.utilities.prompts import StandardPromptResult, SystemPromptResult @@ -155,7 +153,7 @@ class AgentExecutorState(BaseModel): ) -class AgentExecutor(Flow[AgentExecutorState], CrewAgentExecutorMixin): +class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignore[pydantic-unexpected] """Agent Executor for both standalone agents and crew-bound agents. _skip_auto_memory prevents Flow from eagerly allocating a Memory @@ -163,7 +161,7 @@ class AgentExecutor(Flow[AgentExecutorState], CrewAgentExecutorMixin): Inherits from: - Flow[AgentExecutorState]: Provides flow orchestration capabilities - - CrewAgentExecutorMixin: Provides memory methods (short/long/external term) + - BaseAgentExecutor: Provides memory methods (short/long/external term) This executor can operate in two modes: - Standalone mode: When crew and task are None (used by Agent.kickoff()) @@ -172,9 +170,9 @@ class AgentExecutor(Flow[AgentExecutorState], CrewAgentExecutorMixin): _skip_auto_memory: bool = True + executor_type: Literal["experimental"] = "experimental" suppress_flow_events: bool = True # always suppress for executor llm: BaseLLM = Field(exclude=True) - agent: Agent = Field(exclude=True) prompt: SystemPromptResult | StandardPromptResult = Field(exclude=True) max_iter: int = Field(default=25, exclude=True) tools: list[CrewStructuredTool] = Field(default_factory=list, exclude=True) @@ -182,8 +180,6 @@ class AgentExecutor(Flow[AgentExecutorState], CrewAgentExecutorMixin): stop_words: list[str] = Field(default_factory=list, exclude=True) tools_description: str = Field(default="", exclude=True) tools_handler: ToolsHandler | None = Field(default=None, exclude=True) - task: Task | None = Field(default=None, exclude=True) - crew: Crew | None = Field(default=None, exclude=True) step_callback: Any = Field(default=None, exclude=True) original_tools: list[BaseTool] = Field(default_factory=list, exclude=True) function_calling_llm: BaseLLM | None = Field(default=None, exclude=True) @@ -268,17 +264,17 @@ class AgentExecutor(Flow[AgentExecutorState], CrewAgentExecutorMixin): """Get thread-safe state proxy.""" return StateProxy(self._state, self._state_lock) # type: ignore[return-value] - @property + @property # type: ignore[misc] def iterations(self) -> int: """Compatibility property for mixin - returns state iterations.""" - return self._state.iterations # type: ignore[no-any-return] + return int(self._state.iterations) @iterations.setter def iterations(self, value: int) -> None: """Set state iterations.""" self._state.iterations = value - @property + @property # type: ignore[misc] def messages(self) -> list[LLMMessage]: """Compatibility property - returns state messages.""" return self._state.messages # type: ignore[no-any-return] @@ -395,28 +391,28 @@ class AgentExecutor(Flow[AgentExecutorState], CrewAgentExecutorMixin): """ config = self.agent.planning_config if config is not None: - return config.reasoning_effort + return str(config.reasoning_effort) return "medium" def _get_max_replans(self) -> int: """Get max replans from planning config or default to 3.""" config = self.agent.planning_config if config is not None: - return config.max_replans + return int(config.max_replans) return 3 def _get_max_step_iterations(self) -> int: """Get max step iterations from planning config or default to 15.""" config = self.agent.planning_config if config is not None: - return config.max_step_iterations + return int(config.max_step_iterations) return 15 def _get_step_timeout(self) -> int | None: """Get per-step timeout from planning config or default to None.""" config = self.agent.planning_config if config is not None: - return config.step_timeout + return int(config.step_timeout) if config.step_timeout is not None else None return None def _build_context_for_todo(self, todo: TodoItem) -> StepExecutionContext: @@ -1790,7 +1786,7 @@ class AgentExecutor(Flow[AgentExecutorState], CrewAgentExecutorMixin): before_hook_context = ToolCallHookContext( tool_name=func_name, tool_input=args_dict, - tool=structured_tool, # type: ignore[arg-type] + tool=structured_tool, agent=self.agent, task=self.task, crew=self.crew, @@ -1864,7 +1860,7 @@ class AgentExecutor(Flow[AgentExecutorState], CrewAgentExecutorMixin): after_hook_context = ToolCallHookContext( tool_name=func_name, tool_input=args_dict, - tool=structured_tool, # type: ignore[arg-type] + tool=structured_tool, agent=self.agent, task=self.task, crew=self.crew, diff --git a/lib/crewai/src/crewai/flow/flow.py b/lib/crewai/src/crewai/flow/flow.py index a1be6317a..d99aa05de 100644 --- a/lib/crewai/src/crewai/flow/flow.py +++ b/lib/crewai/src/crewai/flow/flow.py @@ -121,6 +121,7 @@ if TYPE_CHECKING: from crewai.context import ExecutionContext from crewai.flow.async_feedback.types import PendingFeedbackContext from crewai.llms.base_llm import BaseLLM + from crewai.state.provider.core import BaseProvider from crewai.flow.visualization import build_flow_structure, render_interactive from crewai.types.streaming import CrewStreamingOutput, FlowStreamingOutput @@ -919,11 +920,60 @@ class Flow(BaseModel, Generic[T], metaclass=FlowMeta): max_method_calls: int = Field(default=100) execution_context: ExecutionContext | None = Field(default=None) + + @classmethod + def from_checkpoint( + cls, path: str, *, provider: BaseProvider | None = None + ) -> Flow: # type: ignore[type-arg] + """Restore a Flow from a checkpoint file.""" + from crewai.context import apply_execution_context + from crewai.events.event_bus import crewai_event_bus + from crewai.state.provider.json_provider import JsonProvider + from crewai.state.runtime import RuntimeState + + state = RuntimeState.from_checkpoint( + path, + provider=provider or JsonProvider(), + context={"from_checkpoint": True}, + ) + crewai_event_bus.set_runtime_state(state) + for entity in state.root: + if not isinstance(entity, Flow): + continue + if entity.execution_context is not None: + apply_execution_context(entity.execution_context) + if isinstance(entity, cls): + entity._restore_from_checkpoint() + return entity + instance = cls() + instance.checkpoint_completed_methods = entity.checkpoint_completed_methods + instance.checkpoint_method_outputs = entity.checkpoint_method_outputs + instance.checkpoint_method_counts = entity.checkpoint_method_counts + instance.checkpoint_state = entity.checkpoint_state + instance._restore_from_checkpoint() + return instance + raise ValueError(f"No Flow found in checkpoint: {path}") + checkpoint_completed_methods: set[str] | None = Field(default=None) checkpoint_method_outputs: list[Any] | None = Field(default=None) checkpoint_method_counts: dict[str, int] | None = Field(default=None) checkpoint_state: dict[str, Any] | None = Field(default=None) + def _restore_from_checkpoint(self) -> None: + """Restore private execution state from checkpoint fields.""" + if self.checkpoint_completed_methods is not None: + self._completed_methods = { + FlowMethodName(m) for m in self.checkpoint_completed_methods + } + if self.checkpoint_method_outputs is not None: + self._method_outputs = list(self.checkpoint_method_outputs) + if self.checkpoint_method_counts is not None: + self._method_execution_counts = { + FlowMethodName(k): v for k, v in self.checkpoint_method_counts.items() + } + if self.checkpoint_state is not None: + self._restore_state(self.checkpoint_state) + _methods: dict[FlowMethodName, FlowMethod[Any, Any]] = PrivateAttr( default_factory=dict ) diff --git a/lib/crewai/src/crewai/lite_agent.py b/lib/crewai/src/crewai/lite_agent.py index bbb464010..2bed7e92f 100644 --- a/lib/crewai/src/crewai/lite_agent.py +++ b/lib/crewai/src/crewai/lite_agent.py @@ -891,7 +891,7 @@ class LiteAgent(FlowTrackable, BaseModel): messages=self._messages, callbacks=self._callbacks, printer=self._printer, - from_agent=self, + from_agent=self, # type: ignore[arg-type] executor_context=self, response_model=response_model, verbose=self.verbose, diff --git a/lib/crewai/src/crewai/llm.py b/lib/crewai/src/crewai/llm.py index c294d6a84..192fffd1a 100644 --- a/lib/crewai/src/crewai/llm.py +++ b/lib/crewai/src/crewai/llm.py @@ -66,7 +66,7 @@ except ImportError: if TYPE_CHECKING: - from crewai.agent.core import Agent + from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.task import Task from crewai.tools.base_tool import BaseTool from crewai.utilities.types import LLMMessage @@ -343,6 +343,7 @@ class AccumulatedToolArgs(BaseModel): class LLM(BaseLLM): + llm_type: Literal["litellm"] = "litellm" completion_cost: float | None = None timeout: float | int | None = None top_p: float | None = None @@ -735,7 +736,7 @@ class LLM(BaseLLM): callbacks: list[Any] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> Any: """Handle a streaming response from the LLM. @@ -1048,7 +1049,7 @@ class LLM(BaseLLM): accumulated_tool_args: defaultdict[int, AccumulatedToolArgs], available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_id: str | None = None, ) -> Any: for tool_call in tool_calls: @@ -1137,7 +1138,7 @@ class LLM(BaseLLM): callbacks: list[Any] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """Handle a non-streaming response from the LLM. @@ -1289,7 +1290,7 @@ class LLM(BaseLLM): callbacks: list[Any] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """Handle an async non-streaming response from the LLM. @@ -1430,7 +1431,7 @@ class LLM(BaseLLM): callbacks: list[Any] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> Any: """Handle an async streaming response from the LLM. @@ -1606,7 +1607,7 @@ class LLM(BaseLLM): tool_calls: list[Any], available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, ) -> Any: """Handle a tool call from the LLM. @@ -1702,7 +1703,7 @@ class LLM(BaseLLM): callbacks: list[Any] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """High-level LLM call method. @@ -1852,7 +1853,7 @@ class LLM(BaseLLM): callbacks: list[Any] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """Async high-level LLM call method. @@ -2001,7 +2002,7 @@ class LLM(BaseLLM): response: Any, call_type: LLMCallType, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, messages: str | list[LLMMessage] | None = None, usage: dict[str, Any] | None = None, ) -> None: diff --git a/lib/crewai/src/crewai/llms/base_llm.py b/lib/crewai/src/crewai/llms/base_llm.py index a0bf7c56a..fd3c8c45e 100644 --- a/lib/crewai/src/crewai/llms/base_llm.py +++ b/lib/crewai/src/crewai/llms/base_llm.py @@ -53,7 +53,7 @@ except ImportError: if TYPE_CHECKING: - from crewai.agent.core import Agent + from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.task import Task from crewai.tools.base_tool import BaseTool from crewai.utilities.types import LLMMessage @@ -117,6 +117,7 @@ class BaseLLM(BaseModel, ABC): model_config = ConfigDict(arbitrary_types_allowed=True, populate_by_name=True) + llm_type: str = "base" model: str temperature: float | None = None api_key: str | None = None @@ -240,7 +241,7 @@ class BaseLLM(BaseModel, ABC): callbacks: list[Any] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """Call the LLM with the given messages. @@ -277,7 +278,7 @@ class BaseLLM(BaseModel, ABC): callbacks: list[Any] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """Call the LLM with the given messages. @@ -434,7 +435,7 @@ class BaseLLM(BaseModel, ABC): callbacks: list[Any] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, ) -> None: """Emit LLM call started event.""" from crewai.utilities.serialization import to_serializable @@ -458,7 +459,7 @@ class BaseLLM(BaseModel, ABC): response: Any, call_type: LLMCallType, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, messages: str | list[LLMMessage] | None = None, usage: dict[str, Any] | None = None, ) -> None: @@ -483,7 +484,7 @@ class BaseLLM(BaseModel, ABC): self, error: str, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, ) -> None: """Emit LLM call failed event.""" crewai_event_bus.emit( @@ -501,7 +502,7 @@ class BaseLLM(BaseModel, ABC): self, chunk: str, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, tool_call: dict[str, Any] | None = None, call_type: LLMCallType | None = None, response_id: str | None = None, @@ -533,7 +534,7 @@ class BaseLLM(BaseModel, ABC): self, chunk: str, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_id: str | None = None, ) -> None: """Emit thinking/reasoning chunk event from a thinking model. @@ -561,7 +562,7 @@ class BaseLLM(BaseModel, ABC): function_args: dict[str, Any], available_functions: dict[str, Any], from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, ) -> str | None: """Handle tool execution with proper event emission. @@ -827,7 +828,7 @@ class BaseLLM(BaseModel, ABC): def _invoke_before_llm_call_hooks( self, messages: list[LLMMessage], - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, ) -> bool: """Invoke before_llm_call hooks for direct LLM calls (no agent context). @@ -896,7 +897,7 @@ class BaseLLM(BaseModel, ABC): self, messages: list[LLMMessage], response: str, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, ) -> str: """Invoke after_llm_call hooks for direct LLM calls (no agent context). diff --git a/lib/crewai/src/crewai/llms/providers/anthropic/completion.py b/lib/crewai/src/crewai/llms/providers/anthropic/completion.py index d710404bd..b6df34b94 100644 --- a/lib/crewai/src/crewai/llms/providers/anthropic/completion.py +++ b/lib/crewai/src/crewai/llms/providers/anthropic/completion.py @@ -148,6 +148,7 @@ class AnthropicCompletion(BaseLLM): offering native tool use, streaming support, and proper message formatting. """ + llm_type: Literal["anthropic"] = "anthropic" model: str = "claude-3-5-sonnet-20241022" timeout: float | None = None max_retries: int = 2 diff --git a/lib/crewai/src/crewai/llms/providers/azure/completion.py b/lib/crewai/src/crewai/llms/providers/azure/completion.py index 52bf05531..db7ab7e73 100644 --- a/lib/crewai/src/crewai/llms/providers/azure/completion.py +++ b/lib/crewai/src/crewai/llms/providers/azure/completion.py @@ -3,7 +3,7 @@ from __future__ import annotations import json import logging import os -from typing import Any, TypedDict +from typing import Any, Literal, TypedDict from urllib.parse import urlparse from pydantic import BaseModel, PrivateAttr, model_validator @@ -74,6 +74,7 @@ class AzureCompletion(BaseLLM): offering native function calling, streaming support, and proper Azure authentication. """ + llm_type: Literal["azure"] = "azure" endpoint: str | None = None api_version: str | None = None timeout: float | None = None diff --git a/lib/crewai/src/crewai/llms/providers/bedrock/completion.py b/lib/crewai/src/crewai/llms/providers/bedrock/completion.py index 6fcf3581d..c25c9bfec 100644 --- a/lib/crewai/src/crewai/llms/providers/bedrock/completion.py +++ b/lib/crewai/src/crewai/llms/providers/bedrock/completion.py @@ -5,7 +5,7 @@ from contextlib import AsyncExitStack import json import logging import os -from typing import TYPE_CHECKING, Any, TypedDict, cast +from typing import TYPE_CHECKING, Any, Literal, TypedDict, cast from pydantic import BaseModel, PrivateAttr, model_validator from typing_extensions import Required @@ -228,6 +228,7 @@ class BedrockCompletion(BaseLLM): - Model-specific conversation format handling (e.g., Cohere requirements) """ + llm_type: Literal["bedrock"] = "bedrock" model: str = "anthropic.claude-3-5-sonnet-20241022-v2:0" aws_access_key_id: str | None = None aws_secret_access_key: str | None = None diff --git a/lib/crewai/src/crewai/llms/providers/gemini/completion.py b/lib/crewai/src/crewai/llms/providers/gemini/completion.py index f790e22cf..c84f7f5fd 100644 --- a/lib/crewai/src/crewai/llms/providers/gemini/completion.py +++ b/lib/crewai/src/crewai/llms/providers/gemini/completion.py @@ -41,6 +41,7 @@ class GeminiCompletion(BaseLLM): offering native function calling, streaming support, and proper Gemini formatting. """ + llm_type: Literal["gemini"] = "gemini" model: str = "gemini-2.0-flash-001" project: str | None = None location: str | None = None diff --git a/lib/crewai/src/crewai/llms/providers/openai/completion.py b/lib/crewai/src/crewai/llms/providers/openai/completion.py index 1e91b2e5e..b76f552df 100644 --- a/lib/crewai/src/crewai/llms/providers/openai/completion.py +++ b/lib/crewai/src/crewai/llms/providers/openai/completion.py @@ -10,7 +10,11 @@ from typing import TYPE_CHECKING, Any, ClassVar, Literal, TypedDict import httpx from openai import APIConnectionError, AsyncOpenAI, NotFoundError, OpenAI, Stream from openai.lib.streaming.chat import ChatCompletionStream -from openai.types.chat import ChatCompletion, ChatCompletionChunk +from openai.types.chat import ( + ChatCompletion, + ChatCompletionChunk, + ChatCompletionMessageFunctionToolCall, +) from openai.types.chat.chat_completion import Choice from openai.types.chat.chat_completion_chunk import ChoiceDelta from openai.types.responses import ( @@ -37,7 +41,7 @@ from crewai.utilities.types import LLMMessage if TYPE_CHECKING: - from crewai.agent.core import Agent + from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.task import Task from crewai.tools.base_tool import BaseTool @@ -184,6 +188,8 @@ class OpenAICompletion(BaseLLM): chain-of-thought without storing data on OpenAI servers. """ + llm_type: Literal["openai"] = "openai" + BUILTIN_TOOL_TYPES: ClassVar[dict[str, str]] = { "web_search": "web_search_preview", "file_search": "file_search", @@ -367,7 +373,7 @@ class OpenAICompletion(BaseLLM): callbacks: list[Any] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """Call OpenAI API (Chat Completions or Responses based on api setting). @@ -435,7 +441,7 @@ class OpenAICompletion(BaseLLM): tools: list[dict[str, BaseTool]] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """Call OpenAI Chat Completions API.""" @@ -467,7 +473,7 @@ class OpenAICompletion(BaseLLM): callbacks: list[Any] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """Async call to OpenAI API (Chat Completions or Responses). @@ -530,7 +536,7 @@ class OpenAICompletion(BaseLLM): tools: list[dict[str, BaseTool]] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """Async call to OpenAI Chat Completions API.""" @@ -561,7 +567,7 @@ class OpenAICompletion(BaseLLM): tools: list[dict[str, BaseTool]] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """Call OpenAI Responses API.""" @@ -592,7 +598,7 @@ class OpenAICompletion(BaseLLM): tools: list[dict[str, BaseTool]] | None = None, available_functions: dict[str, Any] | None = None, from_task: Task | None = None, - from_agent: Agent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """Async call to OpenAI Responses API.""" @@ -1630,10 +1636,8 @@ class OpenAICompletion(BaseLLM): # If there are tool_calls and available_functions, execute the tools if message.tool_calls and available_functions: tool_call = message.tool_calls[0] - if not hasattr(tool_call, "function") or tool_call.function is None: - raise ValueError( - f"Unsupported tool call type: {type(tool_call).__name__}" - ) + if not isinstance(tool_call, ChatCompletionMessageFunctionToolCall): + return message.content function_name = tool_call.function.name try: @@ -2018,11 +2022,13 @@ class OpenAICompletion(BaseLLM): # If there are tool_calls and available_functions, execute the tools if message.tool_calls and available_functions: + from openai.types.chat.chat_completion_message_function_tool_call import ( + ChatCompletionMessageFunctionToolCall, + ) + tool_call = message.tool_calls[0] - if not hasattr(tool_call, "function") or tool_call.function is None: - raise ValueError( - f"Unsupported tool call type: {type(tool_call).__name__}" - ) + if not isinstance(tool_call, ChatCompletionMessageFunctionToolCall): + return message.content function_name = tool_call.function.name try: diff --git a/lib/crewai/src/crewai/runtime_state.py b/lib/crewai/src/crewai/runtime_state.py deleted file mode 100644 index 5e0079ae2..000000000 --- a/lib/crewai/src/crewai/runtime_state.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Unified runtime state for crewAI. - -``RuntimeState`` is a ``RootModel`` whose ``model_dump_json()`` produces a -complete, self-contained snapshot of every active entity in the program. - -The ``Entity`` type alias and ``RuntimeState`` model are built at import time -in ``crewai/__init__.py`` after all forward references are resolved. -""" - -from typing import Any - - -def _entity_discriminator(v: dict[str, Any] | object) -> str: - if isinstance(v, dict): - raw = v.get("entity_type", "agent") - else: - raw = getattr(v, "entity_type", "agent") - return str(raw) diff --git a/lib/crewai/src/crewai/state/__init__.py b/lib/crewai/src/crewai/state/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/crewai/src/crewai/state/event_record.py b/lib/crewai/src/crewai/state/event_record.py new file mode 100644 index 000000000..7b8c20c5b --- /dev/null +++ b/lib/crewai/src/crewai/state/event_record.py @@ -0,0 +1,205 @@ +"""Directed record of execution events. + +Stores events as nodes with typed edges for parent/child, causal, and +sequential relationships. Provides O(1) lookups and traversal. +""" + +from __future__ import annotations + +from typing import Annotated, Any, Literal + +from pydantic import BaseModel, BeforeValidator, Field, PlainSerializer, PrivateAttr + +from crewai.events.base_events import BaseEvent +from crewai.utilities.rw_lock import RWLock + + +_event_type_map: dict[str, type[BaseEvent]] = {} + + +def _resolve_event(v: Any) -> BaseEvent: + """Validate an event value into the correct BaseEvent subclass.""" + if isinstance(v, BaseEvent): + return v + if not isinstance(v, dict): + return BaseEvent.model_validate(v) + if not _event_type_map: + _build_event_type_map() + event_type = v.get("type", "") + cls = _event_type_map.get(event_type, BaseEvent) + if cls is BaseEvent: + return BaseEvent.model_validate(v) + try: + return cls.model_validate(v) + except Exception: + return BaseEvent.model_validate(v) + + +def _build_event_type_map() -> None: + """Populate _event_type_map from all BaseEvent subclasses.""" + + def _collect(cls: type[BaseEvent]) -> None: + for sub in cls.__subclasses__(): + type_field = sub.model_fields.get("type") + if type_field and type_field.default: + _event_type_map[type_field.default] = sub + _collect(sub) + + _collect(BaseEvent) + + +EdgeType = Literal[ + "parent", + "child", + "trigger", + "triggered_by", + "next", + "previous", + "started", + "completed_by", +] + + +class EventNode(BaseModel): + """A node wrapping a single event with its adjacency lists.""" + + event: Annotated[ + BaseEvent, + BeforeValidator(_resolve_event), + PlainSerializer(lambda v: v.model_dump()), + ] + edges: dict[EdgeType, list[str]] = Field(default_factory=dict) + + def add_edge(self, edge_type: EdgeType, target_id: str) -> None: + """Add an edge from this node to another. + + Args: + edge_type: The relationship type. + target_id: The event_id of the target node. + """ + self.edges.setdefault(edge_type, []).append(target_id) + + def neighbors(self, edge_type: EdgeType) -> list[str]: + """Return neighbor IDs for a given edge type. + + Args: + edge_type: The relationship type to query. + + Returns: + List of event IDs connected by this edge type. + """ + return self.edges.get(edge_type, []) + + +class EventRecord(BaseModel): + """Directed record of execution events with O(1) node lookup. + + Events are added via :meth:`add` which automatically wires edges + based on the event's relationship fields — ``parent_event_id``, + ``triggered_by_event_id``, ``previous_event_id``, ``started_event_id``. + """ + + nodes: dict[str, EventNode] = Field(default_factory=dict) + _lock: RWLock = PrivateAttr(default_factory=RWLock) + + def add(self, event: BaseEvent) -> EventNode: + """Add an event to the record and wire its edges. + + Args: + event: The event to insert. + + Returns: + The created node. + """ + with self._lock.w_locked(): + node = EventNode(event=event) + self.nodes[event.event_id] = node + + if event.parent_event_id and event.parent_event_id in self.nodes: + node.add_edge("parent", event.parent_event_id) + self.nodes[event.parent_event_id].add_edge("child", event.event_id) + + if ( + event.triggered_by_event_id + and event.triggered_by_event_id in self.nodes + ): + node.add_edge("triggered_by", event.triggered_by_event_id) + self.nodes[event.triggered_by_event_id].add_edge( + "trigger", event.event_id + ) + + if event.previous_event_id and event.previous_event_id in self.nodes: + node.add_edge("previous", event.previous_event_id) + self.nodes[event.previous_event_id].add_edge("next", event.event_id) + + if event.started_event_id and event.started_event_id in self.nodes: + node.add_edge("started", event.started_event_id) + self.nodes[event.started_event_id].add_edge( + "completed_by", event.event_id + ) + + return node + + def get(self, event_id: str) -> EventNode | None: + """Look up a node by event ID. + + Args: + event_id: The event's unique identifier. + + Returns: + The node, or None if not found. + """ + with self._lock.r_locked(): + return self.nodes.get(event_id) + + def descendants(self, event_id: str) -> list[EventNode]: + """Return all descendant nodes, children recursively. + + Args: + event_id: The root event ID to start from. + + Returns: + All descendant nodes in breadth-first order. + """ + with self._lock.r_locked(): + result: list[EventNode] = [] + queue = [event_id] + visited: set[str] = set() + + while queue: + current_id = queue.pop(0) + if current_id in visited: + continue + visited.add(current_id) + + node = self.nodes.get(current_id) + if node is None: + continue + + for child_id in node.neighbors("child"): + if child_id not in visited: + child_node = self.nodes.get(child_id) + if child_node: + result.append(child_node) + queue.append(child_id) + + return result + + def roots(self) -> list[EventNode]: + """Return all root nodes — events with no parent. + + Returns: + List of root event nodes. + """ + with self._lock.r_locked(): + return [ + node for node in self.nodes.values() if not node.neighbors("parent") + ] + + def __len__(self) -> int: + with self._lock.r_locked(): + return len(self.nodes) + + def __contains__(self, event_id: str) -> bool: + with self._lock.r_locked(): + return event_id in self.nodes diff --git a/lib/crewai/src/crewai/state/provider/__init__.py b/lib/crewai/src/crewai/state/provider/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/crewai/src/crewai/state/provider/core.py b/lib/crewai/src/crewai/state/provider/core.py new file mode 100644 index 000000000..ee420eea0 --- /dev/null +++ b/lib/crewai/src/crewai/state/provider/core.py @@ -0,0 +1,81 @@ +"""Base protocol for state providers.""" + +from __future__ import annotations + +from typing import Any, Protocol, runtime_checkable + +from pydantic import GetCoreSchemaHandler +from pydantic_core import CoreSchema, core_schema + + +@runtime_checkable +class BaseProvider(Protocol): + """Interface for persisting and restoring runtime state checkpoints. + + Implementations handle the storage backend — filesystem, cloud, database, + etc. — while ``RuntimeState`` handles serialization. + """ + + @classmethod + def __get_pydantic_core_schema__( + cls, source_type: Any, handler: GetCoreSchemaHandler + ) -> CoreSchema: + """Allow Pydantic to validate any ``BaseProvider`` instance.""" + + def _validate(v: Any) -> BaseProvider: + if isinstance(v, BaseProvider): + return v + raise TypeError(f"Expected a BaseProvider instance, got {type(v)}") + + return core_schema.no_info_plain_validator_function( + _validate, + serialization=core_schema.plain_serializer_function_ser_schema( + lambda v: type(v).__name__, info_arg=False + ), + ) + + def checkpoint(self, data: str, directory: str) -> str: + """Persist a snapshot synchronously. + + Args: + data: The serialized string to persist. + directory: Logical destination: path, bucket prefix, etc. + + Returns: + A location identifier for the saved checkpoint, such as a file path or URI. + """ + ... + + async def acheckpoint(self, data: str, directory: str) -> str: + """Persist a snapshot asynchronously. + + Args: + data: The serialized string to persist. + directory: Logical destination: path, bucket prefix, etc. + + Returns: + A location identifier for the saved checkpoint, such as a file path or URI. + """ + ... + + def from_checkpoint(self, location: str) -> str: + """Read a snapshot synchronously. + + Args: + location: The identifier returned by a previous ``checkpoint`` call. + + Returns: + The raw serialized string. + """ + ... + + async def afrom_checkpoint(self, location: str) -> str: + """Read a snapshot asynchronously. + + Args: + location: The identifier returned by a previous ``acheckpoint`` call. + + Returns: + The raw serialized string. + """ + ... diff --git a/lib/crewai/src/crewai/state/provider/json_provider.py b/lib/crewai/src/crewai/state/provider/json_provider.py new file mode 100644 index 000000000..656e19fe0 --- /dev/null +++ b/lib/crewai/src/crewai/state/provider/json_provider.py @@ -0,0 +1,87 @@ +"""Filesystem JSON state provider.""" + +from __future__ import annotations + +from datetime import datetime, timezone +from pathlib import Path +import uuid + +import aiofiles +import aiofiles.os + +from crewai.state.provider.core import BaseProvider + + +class JsonProvider(BaseProvider): + """Persists runtime state checkpoints as JSON files on the local filesystem.""" + + def checkpoint(self, data: str, directory: str) -> str: + """Write a JSON checkpoint file to the directory. + + Args: + data: The serialized JSON string to persist. + directory: Filesystem path where the checkpoint will be saved. + + Returns: + The path to the written checkpoint file. + """ + file_path = _build_path(directory) + file_path.parent.mkdir(parents=True, exist_ok=True) + + with open(file_path, "w") as f: + f.write(data) + return str(file_path) + + async def acheckpoint(self, data: str, directory: str) -> str: + """Write a JSON checkpoint file to the directory asynchronously. + + Args: + data: The serialized JSON string to persist. + directory: Filesystem path where the checkpoint will be saved. + + Returns: + The path to the written checkpoint file. + """ + file_path = _build_path(directory) + await aiofiles.os.makedirs(str(file_path.parent), exist_ok=True) + + async with aiofiles.open(file_path, "w") as f: + await f.write(data) + return str(file_path) + + def from_checkpoint(self, location: str) -> str: + """Read a JSON checkpoint file. + + Args: + location: Filesystem path to the checkpoint file. + + Returns: + The raw JSON string. + """ + return Path(location).read_text() + + async def afrom_checkpoint(self, location: str) -> str: + """Read a JSON checkpoint file asynchronously. + + Args: + location: Filesystem path to the checkpoint file. + + Returns: + The raw JSON string. + """ + async with aiofiles.open(location) as f: + return await f.read() + + +def _build_path(directory: str) -> Path: + """Build a timestamped checkpoint file path. + + Args: + directory: Parent directory for the checkpoint file. + + Returns: + The target file path. + """ + ts = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%S") + filename = f"{ts}_{uuid.uuid4().hex[:8]}.json" + return Path(directory) / filename diff --git a/lib/crewai/src/crewai/state/runtime.py b/lib/crewai/src/crewai/state/runtime.py new file mode 100644 index 000000000..a5bb6bd8d --- /dev/null +++ b/lib/crewai/src/crewai/state/runtime.py @@ -0,0 +1,160 @@ +"""Unified runtime state for crewAI. + +``RuntimeState`` is a ``RootModel`` whose ``model_dump_json()`` produces a +complete, self-contained snapshot of every active entity in the program. + +The ``Entity`` type is resolved at import time in ``crewai/__init__.py`` +via ``RuntimeState.model_rebuild()``. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from pydantic import ( + ModelWrapValidatorHandler, + PrivateAttr, + RootModel, + model_serializer, + model_validator, +) + +from crewai.context import capture_execution_context +from crewai.state.event_record import EventRecord +from crewai.state.provider.core import BaseProvider +from crewai.state.provider.json_provider import JsonProvider + + +if TYPE_CHECKING: + from crewai import Entity + + +def _sync_checkpoint_fields(entity: object) -> None: + """Copy private runtime attrs into checkpoint fields before serializing. + + Args: + entity: The entity whose private runtime attributes will be + copied into its public checkpoint fields. + """ + from crewai.crew import Crew + from crewai.flow.flow import Flow + + if isinstance(entity, Flow): + entity.checkpoint_completed_methods = ( + set(entity._completed_methods) if entity._completed_methods else None + ) + entity.checkpoint_method_outputs = ( + list(entity._method_outputs) if entity._method_outputs else None + ) + entity.checkpoint_method_counts = ( + {str(k): v for k, v in entity._method_execution_counts.items()} + if entity._method_execution_counts + else None + ) + entity.checkpoint_state = ( + entity._copy_and_serialize_state() if entity._state is not None else None + ) + if isinstance(entity, Crew): + entity.checkpoint_inputs = entity._inputs + entity.checkpoint_train = entity._train + entity.checkpoint_kickoff_event_id = entity._kickoff_event_id + + +class RuntimeState(RootModel): # type: ignore[type-arg] + root: list[Entity] + _provider: BaseProvider = PrivateAttr(default_factory=JsonProvider) + _event_record: EventRecord = PrivateAttr(default_factory=EventRecord) + + @property + def event_record(self) -> EventRecord: + """The execution event record.""" + return self._event_record + + @model_serializer(mode="plain") + def _serialize(self) -> dict[str, Any]: + return { + "entities": [e.model_dump(mode="json") for e in self.root], + "event_record": self._event_record.model_dump(), + } + + @model_validator(mode="wrap") + @classmethod + def _deserialize( + cls, data: Any, handler: ModelWrapValidatorHandler[RuntimeState] + ) -> RuntimeState: + if isinstance(data, dict) and "entities" in data: + record_data = data.get("event_record") + state = handler(data["entities"]) + if record_data: + state._event_record = EventRecord.model_validate(record_data) + return state + return handler(data) + + def checkpoint(self, directory: str) -> str: + """Write a checkpoint file to the directory. + + Args: + directory: Filesystem path where the checkpoint JSON will be saved. + + Returns: + A location identifier for the saved checkpoint. + """ + _prepare_entities(self.root) + return self._provider.checkpoint(self.model_dump_json(), directory) + + async def acheckpoint(self, directory: str) -> str: + """Async version of :meth:`checkpoint`. + + Args: + directory: Filesystem path where the checkpoint JSON will be saved. + + Returns: + A location identifier for the saved checkpoint. + """ + _prepare_entities(self.root) + return await self._provider.acheckpoint(self.model_dump_json(), directory) + + @classmethod + def from_checkpoint( + cls, location: str, provider: BaseProvider, **kwargs: Any + ) -> RuntimeState: + """Restore a RuntimeState from a checkpoint. + + Args: + location: The identifier returned by a previous ``checkpoint`` call. + provider: The storage backend to read from. + **kwargs: Passed to ``model_validate_json``. + + Returns: + A restored RuntimeState. + """ + raw = provider.from_checkpoint(location) + return cls.model_validate_json(raw, **kwargs) + + @classmethod + async def afrom_checkpoint( + cls, location: str, provider: BaseProvider, **kwargs: Any + ) -> RuntimeState: + """Async version of :meth:`from_checkpoint`. + + Args: + location: The identifier returned by a previous ``acheckpoint`` call. + provider: The storage backend to read from. + **kwargs: Passed to ``model_validate_json``. + + Returns: + A restored RuntimeState. + """ + raw = await provider.afrom_checkpoint(location) + return cls.model_validate_json(raw, **kwargs) + + +def _prepare_entities(root: list[Entity]) -> None: + """Capture execution context and sync checkpoint fields on each entity. + + Args: + root: List of entities to prepare for serialization. + """ + for entity in root: + entity.execution_context = capture_execution_context() + _sync_checkpoint_fields(entity) diff --git a/lib/crewai/src/crewai/task.py b/lib/crewai/src/crewai/task.py index 7cd0bdca5..73e49ade9 100644 --- a/lib/crewai/src/crewai/task.py +++ b/lib/crewai/src/crewai/task.py @@ -598,7 +598,10 @@ class Task(BaseModel): tools = tools or self.tools or [] self.processed_by_agents.add(agent.role) - crewai_event_bus.emit(self, TaskStartedEvent(context=context, task=self)) + if not (agent.agent_executor and agent.agent_executor._resuming): + crewai_event_bus.emit( + self, TaskStartedEvent(context=context, task=self) + ) result = await agent.aexecute_task( task=self, context=context, @@ -717,7 +720,10 @@ class Task(BaseModel): tools = tools or self.tools or [] self.processed_by_agents.add(agent.role) - crewai_event_bus.emit(self, TaskStartedEvent(context=context, task=self)) + if not (agent.agent_executor and agent.agent_executor._resuming): + crewai_event_bus.emit( + self, TaskStartedEvent(context=context, task=self) + ) result = agent.execute_task( task=self, context=context, diff --git a/lib/crewai/src/crewai/tools/base_tool.py b/lib/crewai/src/crewai/tools/base_tool.py index 118fa307b..11f88a768 100644 --- a/lib/crewai/src/crewai/tools/base_tool.py +++ b/lib/crewai/src/crewai/tools/base_tool.py @@ -3,10 +3,12 @@ from __future__ import annotations from abc import ABC, abstractmethod import asyncio from collections.abc import Awaitable, Callable +import importlib from inspect import Parameter, signature import json import threading from typing import ( + Annotated, Any, Generic, ParamSpec, @@ -19,13 +21,23 @@ from pydantic import ( BaseModel as PydanticBaseModel, ConfigDict, Field, + GetCoreSchemaHandler, + PlainSerializer, PrivateAttr, + computed_field, create_model, field_validator, ) +from pydantic_core import CoreSchema, core_schema from typing_extensions import TypeIs -from crewai.tools.structured_tool import CrewStructuredTool, build_schema_hint +from crewai.tools.structured_tool import ( + CrewStructuredTool, + _deserialize_schema, + _serialize_schema, + build_schema_hint, +) +from crewai.types.callback import SerializableCallable, _resolve_dotted_path from crewai.utilities.printer import Printer from crewai.utilities.pydantic_schema_utils import generate_model_description from crewai.utilities.string_utils import sanitize_tool_name @@ -36,6 +48,42 @@ _printer = Printer() P = ParamSpec("P") R = TypeVar("R", covariant=True) +# Registry populated by BaseTool.__init_subclass__; used for checkpoint +# deserialization so that list[BaseTool] fields resolve the concrete class. +_TOOL_TYPE_REGISTRY: dict[str, type] = {} + +# Sentinel set after BaseTool is defined so __get_pydantic_core_schema__ +# can distinguish the base class from subclasses despite +# ``from __future__ import annotations``. +_BASE_TOOL_CLS: type | None = None + + +def _resolve_tool_dict(value: dict[str, Any]) -> Any: + """Validate a dict with ``tool_type`` into the concrete BaseTool subclass.""" + dotted = value.get("tool_type", "") + tool_cls = _TOOL_TYPE_REGISTRY.get(dotted) + if tool_cls is None: + mod_path, cls_name = dotted.rsplit(".", 1) + tool_cls = getattr(importlib.import_module(mod_path), cls_name) + + # Pre-resolve serialized callback strings so SerializableCallable's + # BeforeValidator sees a callable and skips the env-var guard. + data = dict(value) + for key in ("cache_function",): + val = data.get(key) + if isinstance(val, str): + try: + data[key] = _resolve_dotted_path(val) + except (ValueError, ImportError): + data.pop(key) + + return tool_cls.model_validate(data) # type: ignore[union-attr] + + +def _default_cache_function(_args: Any = None, _result: Any = None) -> bool: + """Default cache function that always allows caching.""" + return True + def _is_async_callable(func: Callable[..., Any]) -> bool: """Check if a callable is async.""" @@ -60,6 +108,36 @@ class BaseTool(BaseModel, ABC): model_config = ConfigDict(arbitrary_types_allowed=True) + def __init_subclass__(cls, **kwargs: Any) -> None: + super().__init_subclass__(**kwargs) + key = f"{cls.__module__}.{cls.__qualname__}" + _TOOL_TYPE_REGISTRY[key] = cls + + @classmethod + def __get_pydantic_core_schema__( + cls, source_type: Any, handler: GetCoreSchemaHandler + ) -> CoreSchema: + default_schema = handler(source_type) + if cls is not _BASE_TOOL_CLS: + return default_schema + + def _validate_tool(value: Any, nxt: Any) -> Any: + if isinstance(value, _BASE_TOOL_CLS): + return value + if isinstance(value, dict) and "tool_type" in value: + return _resolve_tool_dict(value) + return nxt(value) + + return core_schema.no_info_wrap_validator_function( + _validate_tool, + default_schema, + serialization=core_schema.plain_serializer_function_ser_schema( + lambda v: v.model_dump(mode="json"), + info_arg=False, + when_used="json", + ), + ) + name: str = Field( description="The unique name of the tool that clearly communicates its purpose." ) @@ -70,7 +148,10 @@ class BaseTool(BaseModel, ABC): default_factory=list, description="List of environment variables used by the tool.", ) - args_schema: type[PydanticBaseModel] = Field( + args_schema: Annotated[ + type[PydanticBaseModel], + PlainSerializer(_serialize_schema, return_type=dict | None, when_used="json"), + ] = Field( default=_ArgsSchemaPlaceholder, validate_default=True, description="The schema for the arguments that the tool accepts.", @@ -80,8 +161,8 @@ class BaseTool(BaseModel, ABC): default=False, description="Flag to check if the description has been updated." ) - cache_function: Callable[..., bool] = Field( - default=lambda _args=None, _result=None: True, + cache_function: SerializableCallable = Field( + default=_default_cache_function, description="Function that will be used to determine if the tool should be cached, should return a boolean. If None, the tool will be cached.", ) result_as_answer: bool = Field( @@ -98,12 +179,24 @@ class BaseTool(BaseModel, ABC): ) _usage_lock: threading.Lock = PrivateAttr(default_factory=threading.Lock) + @computed_field # type: ignore[prop-decorator] + @property + def tool_type(self) -> str: + cls = type(self) + return f"{cls.__module__}.{cls.__qualname__}" + @field_validator("args_schema", mode="before") @classmethod def _default_args_schema( - cls, v: type[PydanticBaseModel] + cls, v: type[PydanticBaseModel] | dict[str, Any] | None ) -> type[PydanticBaseModel]: - if v != cls._ArgsSchemaPlaceholder: + if isinstance(v, dict): + restored = _deserialize_schema(v) + if restored is not None: + return restored + if v is None or v == cls._ArgsSchemaPlaceholder: + pass # fall through to generate from signature + elif isinstance(v, type): return v run_sig = signature(cls._run) @@ -365,6 +458,9 @@ class BaseTool(BaseModel, ABC): ) +_BASE_TOOL_CLS = BaseTool + + class Tool(BaseTool, Generic[P, R]): """Tool that wraps a callable function. diff --git a/lib/crewai/src/crewai/tools/structured_tool.py b/lib/crewai/src/crewai/tools/structured_tool.py index 60a457f3b..b301a9eed 100644 --- a/lib/crewai/src/crewai/tools/structured_tool.py +++ b/lib/crewai/src/crewai/tools/structured_tool.py @@ -5,16 +5,39 @@ from collections.abc import Callable import inspect import json import textwrap -from typing import TYPE_CHECKING, Any, get_type_hints +from typing import TYPE_CHECKING, Annotated, Any, get_type_hints -from pydantic import BaseModel, Field, create_model +from pydantic import ( + BaseModel, + BeforeValidator, + ConfigDict, + Field, + PlainSerializer, + PrivateAttr, + create_model, + model_validator, +) +from typing_extensions import Self from crewai.utilities.logger import Logger +from crewai.utilities.pydantic_schema_utils import create_model_from_schema from crewai.utilities.string_utils import sanitize_tool_name +def _serialize_schema(v: type[BaseModel] | None) -> dict[str, Any] | None: + return v.model_json_schema() if v else None + + +def _deserialize_schema(v: Any) -> type[BaseModel] | None: + if v is None or isinstance(v, type): + return v + if isinstance(v, dict): + return create_model_from_schema(v) + return None + + if TYPE_CHECKING: - from crewai.tools.base_tool import BaseTool + pass def build_schema_hint(args_schema: type[BaseModel]) -> str: @@ -42,49 +65,35 @@ class ToolUsageLimitExceededError(Exception): """Exception raised when a tool has reached its maximum usage limit.""" -class CrewStructuredTool: +class CrewStructuredTool(BaseModel): """A structured tool that can operate on any number of inputs. This tool intends to replace StructuredTool with a custom implementation that integrates better with CrewAI's ecosystem. """ - def __init__( - self, - name: str, - description: str, - args_schema: type[BaseModel], - func: Callable[..., Any], - result_as_answer: bool = False, - max_usage_count: int | None = None, - current_usage_count: int = 0, - cache_function: Callable[..., bool] | None = None, - ) -> None: - """Initialize the structured tool. + model_config = ConfigDict(arbitrary_types_allowed=True) - Args: - name: The name of the tool - description: A description of what the tool does - args_schema: The pydantic model for the tool's arguments - func: The function to run when the tool is called - result_as_answer: Whether to return the output directly - max_usage_count: Maximum number of times this tool can be used. None means unlimited usage. - current_usage_count: Current number of times this tool has been used. - cache_function: Function to determine if the tool result should be cached. - """ - self.name = name - self.description = description - self.args_schema = args_schema - self.func = func - self._logger = Logger() - self.result_as_answer = result_as_answer - self.max_usage_count = max_usage_count - self.current_usage_count = current_usage_count - self.cache_function = cache_function - self._original_tool: BaseTool | None = None + name: str = Field(default="") + description: str = Field(default="") + args_schema: Annotated[ + type[BaseModel] | None, + BeforeValidator(_deserialize_schema), + PlainSerializer(_serialize_schema), + ] = Field(default=None) + func: Any = Field(default=None, exclude=True) + result_as_answer: bool = Field(default=False) + max_usage_count: int | None = Field(default=None) + current_usage_count: int = Field(default=0) + cache_function: Any = Field(default=None, exclude=True) + _logger: Logger = PrivateAttr(default_factory=Logger) + _original_tool: Any = PrivateAttr(default=None) - # Validate the function signature matches the schema - self._validate_function_signature() + @model_validator(mode="after") + def _validate_func(self) -> Self: + if self.func is not None: + self._validate_function_signature() + return self @classmethod def from_function( @@ -189,6 +198,8 @@ class CrewStructuredTool: def _validate_function_signature(self) -> None: """Validate that the function signature matches the args schema.""" + if not self.args_schema: + return sig = inspect.signature(self.func) schema_fields = self.args_schema.model_fields @@ -228,9 +239,11 @@ class CrewStructuredTool: except json.JSONDecodeError as e: raise ValueError(f"Failed to parse arguments as JSON: {e}") from e + if not self.args_schema: + return raw_args if isinstance(raw_args, dict) else {} try: validated_args = self.args_schema.model_validate(raw_args) - return validated_args.model_dump() + return dict(validated_args.model_dump()) except Exception as e: hint = build_schema_hint(self.args_schema) raise ValueError(f"Arguments validation failed: {e}{hint}") from e @@ -275,6 +288,8 @@ class CrewStructuredTool: def _run(self, *args: Any, **kwargs: Any) -> Any: """Legacy method for compatibility.""" # Convert args/kwargs to our expected format + if not self.args_schema: + return self.func(*args, **kwargs) input_dict = dict(zip(self.args_schema.model_fields.keys(), args, strict=False)) input_dict.update(kwargs) return self.invoke(input_dict) @@ -321,6 +336,8 @@ class CrewStructuredTool: @property def args(self) -> dict[str, Any]: """Get the tool's input arguments schema.""" + if not self.args_schema: + return {} schema: dict[str, Any] = self.args_schema.model_json_schema()["properties"] return schema diff --git a/lib/crewai/src/crewai/utilities/agent_utils.py b/lib/crewai/src/crewai/utilities/agent_utils.py index c1a341c39..09c570fac 100644 --- a/lib/crewai/src/crewai/utilities/agent_utils.py +++ b/lib/crewai/src/crewai/utilities/agent_utils.py @@ -40,7 +40,7 @@ from crewai.utilities.types import LLMMessage if TYPE_CHECKING: - from crewai.agent import Agent + from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.crew_agent_executor import CrewAgentExecutor from crewai.agents.tools_handler import ToolsHandler from crewai.experimental.agent_executor import AgentExecutor @@ -431,7 +431,7 @@ def get_llm_response( tools: list[dict[str, Any]] | None = None, available_functions: dict[str, Callable[..., Any]] | None = None, from_task: Task | None = None, - from_agent: Agent | LiteAgent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None = None, verbose: bool = True, @@ -468,7 +468,7 @@ def get_llm_response( callbacks=callbacks, available_functions=available_functions, from_task=from_task, - from_agent=from_agent, # type: ignore[arg-type] + from_agent=from_agent, response_model=response_model, ) except Exception as e: @@ -487,7 +487,7 @@ async def aget_llm_response( tools: list[dict[str, Any]] | None = None, available_functions: dict[str, Callable[..., Any]] | None = None, from_task: Task | None = None, - from_agent: Agent | LiteAgent | None = None, + from_agent: BaseAgent | None = None, response_model: type[BaseModel] | None = None, executor_context: CrewAgentExecutor | AgentExecutor | None = None, verbose: bool = True, @@ -524,7 +524,7 @@ async def aget_llm_response( callbacks=callbacks, available_functions=available_functions, from_task=from_task, - from_agent=from_agent, # type: ignore[arg-type] + from_agent=from_agent, response_model=response_model, ) except Exception as e: @@ -1363,7 +1363,7 @@ def execute_single_native_tool_call( original_tools: list[BaseTool], structured_tools: list[CrewStructuredTool] | None, tools_handler: ToolsHandler | None, - agent: Agent | None, + agent: BaseAgent | None, task: Task | None, crew: Any | None, event_source: Any, diff --git a/lib/crewai/src/crewai/utilities/prompts.py b/lib/crewai/src/crewai/utilities/prompts.py index e88a9708a..821623b89 100644 --- a/lib/crewai/src/crewai/utilities/prompts.py +++ b/lib/crewai/src/crewai/utilities/prompts.py @@ -2,25 +2,33 @@ from __future__ import annotations -from typing import Annotated, Any, Literal +from typing import Any, Literal from pydantic import BaseModel, Field -from typing_extensions import TypedDict from crewai.utilities.i18n import I18N, get_i18n -class StandardPromptResult(TypedDict): +class StandardPromptResult(BaseModel): """Result with only prompt field for standard mode.""" - prompt: Annotated[str, "The generated prompt string"] + prompt: str = Field(default="") + + def get(self, key: str, default: Any = None) -> Any: + return getattr(self, key, default) + + def __getitem__(self, key: str) -> Any: + return getattr(self, key) + + def __contains__(self, key: str) -> bool: + return hasattr(self, key) and getattr(self, key) is not None class SystemPromptResult(StandardPromptResult): """Result with system, user, and prompt fields for system prompt mode.""" - system: Annotated[str, "The system prompt component"] - user: Annotated[str, "The user prompt component"] + system: str = Field(default="") + user: str = Field(default="") COMPONENTS = Literal[ diff --git a/lib/crewai/src/crewai/utilities/streaming.py b/lib/crewai/src/crewai/utilities/streaming.py index 5db09ba9c..dd0992684 100644 --- a/lib/crewai/src/crewai/utilities/streaming.py +++ b/lib/crewai/src/crewai/utilities/streaming.py @@ -142,8 +142,8 @@ def _unregister_handler(handler: Callable[[Any, BaseEvent], None]) -> None: handler: The handler function to unregister. """ with crewai_event_bus._rwlock.w_locked(): - handlers: frozenset[Callable[[Any, BaseEvent], None]] = ( - crewai_event_bus._sync_handlers.get(LLMStreamChunkEvent, frozenset()) + handlers: frozenset[Callable[..., None]] = crewai_event_bus._sync_handlers.get( + LLMStreamChunkEvent, frozenset() ) crewai_event_bus._sync_handlers[LLMStreamChunkEvent] = handlers - {handler} diff --git a/lib/crewai/src/crewai/utilities/token_counter_callback.py b/lib/crewai/src/crewai/utilities/token_counter_callback.py index 9c3a5cc5f..d64e5b2f0 100644 --- a/lib/crewai/src/crewai/utilities/token_counter_callback.py +++ b/lib/crewai/src/crewai/utilities/token_counter_callback.py @@ -7,6 +7,8 @@ when available (for the litellm fallback path). from typing import Any +from pydantic import BaseModel, Field + from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess from crewai.utilities.logger_utils import suppress_warnings @@ -21,35 +23,26 @@ except ImportError: LITELLM_AVAILABLE = False -# Create a base class that conditionally inherits from litellm's CustomLogger -# when available, or from object when not available -if LITELLM_AVAILABLE and LiteLLMCustomLogger is not None: - _BaseClass: type = LiteLLMCustomLogger -else: - _BaseClass = object - - -class TokenCalcHandler(_BaseClass): # type: ignore[misc] +class TokenCalcHandler(BaseModel): """Handler for calculating and tracking token usage in LLM calls. This handler tracks prompt tokens, completion tokens, and cached tokens across requests. It works standalone and also integrates with litellm's logging system when litellm is installed (for the fallback path). - - Attributes: - token_cost_process: The token process tracker to accumulate usage metrics. """ - def __init__(self, token_cost_process: TokenProcess | None, **kwargs: Any) -> None: - """Initialize the token calculation handler. + model_config = {"arbitrary_types_allowed": True} - Args: - token_cost_process: Optional token process tracker for accumulating metrics. - """ - # Only call super().__init__ if we have a real parent class with __init__ - if LITELLM_AVAILABLE and LiteLLMCustomLogger is not None: - super().__init__(**kwargs) - self.token_cost_process = token_cost_process + __hash__ = object.__hash__ + + token_cost_process: TokenProcess | None = Field(default=None) + + def __init__( + self, token_cost_process: TokenProcess | None = None, /, **kwargs: Any + ) -> None: + if token_cost_process is not None: + kwargs["token_cost_process"] = token_cost_process + super().__init__(**kwargs) def log_success_event( self, @@ -58,18 +51,7 @@ class TokenCalcHandler(_BaseClass): # type: ignore[misc] start_time: float, end_time: float, ) -> None: - """Log successful LLM API call and track token usage. - - This method has the same interface as litellm's CustomLogger.log_success_event() - so it can be used as a litellm callback when litellm is installed, or called - directly when litellm is not installed. - - Args: - kwargs: The arguments passed to the LLM call. - response_obj: The response object from the LLM API. - start_time: The timestamp when the call started. - end_time: The timestamp when the call completed. - """ + """Log successful LLM API call and track token usage.""" if self.token_cost_process is None: return diff --git a/lib/crewai/tests/agents/test_async_agent_executor.py b/lib/crewai/tests/agents/test_async_agent_executor.py index 01297bdcc..0ed37d824 100644 --- a/lib/crewai/tests/agents/test_async_agent_executor.py +++ b/lib/crewai/tests/agents/test_async_agent_executor.py @@ -6,68 +6,65 @@ from unittest.mock import AsyncMock, MagicMock, Mock, patch import pytest +from crewai.agent import Agent from crewai.agents.crew_agent_executor import CrewAgentExecutor from crewai.agents.parser import AgentAction, AgentFinish +from crewai.agents.tools_handler import ToolsHandler +from crewai.llms.base_llm import BaseLLM +from crewai.task import Task from crewai.tools.tool_types import ToolResult @pytest.fixture def mock_llm() -> MagicMock: """Create a mock LLM for testing.""" - llm = MagicMock() + llm = MagicMock(spec=BaseLLM) llm.supports_stop_words.return_value = True llm.stop = [] return llm @pytest.fixture -def mock_agent() -> MagicMock: - """Create a mock agent for testing.""" - agent = MagicMock() - agent.role = "Test Agent" - agent.key = "test_agent_key" - agent.verbose = False - agent.id = "test_agent_id" - return agent +def test_agent(mock_llm: MagicMock) -> Agent: + """Create a real Agent for testing.""" + return Agent( + role="Test Agent", + goal="Test goal", + backstory="Test backstory", + llm=mock_llm, + verbose=False, + ) @pytest.fixture -def mock_task() -> MagicMock: - """Create a mock task for testing.""" - task = MagicMock() - task.description = "Test task description" - return task - - -@pytest.fixture -def mock_crew() -> MagicMock: - """Create a mock crew for testing.""" - crew = MagicMock() - crew.verbose = False - crew._train = False - return crew +def test_task(test_agent: Agent) -> Task: + """Create a real Task for testing.""" + return Task( + description="Test task description", + expected_output="Test output", + agent=test_agent, + ) @pytest.fixture def mock_tools_handler() -> MagicMock: """Create a mock tools handler.""" - return MagicMock() + return MagicMock(spec=ToolsHandler) @pytest.fixture def executor( mock_llm: MagicMock, - mock_agent: MagicMock, - mock_task: MagicMock, - mock_crew: MagicMock, + test_agent: Agent, + test_task: Task, mock_tools_handler: MagicMock, ) -> CrewAgentExecutor: """Create a CrewAgentExecutor instance for testing.""" return CrewAgentExecutor( llm=mock_llm, - task=mock_task, - crew=mock_crew, - agent=mock_agent, + task=test_task, + crew=None, + agent=test_agent, prompt={"prompt": "Test prompt {input} {tool_names} {tools}"}, max_iter=5, tools=[], @@ -229,8 +226,8 @@ class TestAsyncAgentExecutor: @pytest.mark.asyncio async def test_concurrent_ainvoke_calls( - self, mock_llm: MagicMock, mock_agent: MagicMock, mock_task: MagicMock, - mock_crew: MagicMock, mock_tools_handler: MagicMock + self, mock_llm: MagicMock, test_agent: Agent, test_task: Task, + mock_tools_handler: MagicMock, ) -> None: """Test that multiple ainvoke calls can run concurrently.""" max_concurrent = 0 @@ -242,9 +239,9 @@ class TestAsyncAgentExecutor: executor = CrewAgentExecutor( llm=mock_llm, - task=mock_task, - crew=mock_crew, - agent=mock_agent, + task=test_task, + crew=None, + agent=test_agent, prompt={"prompt": "Test {input} {tool_names} {tools}"}, max_iter=5, tools=[], diff --git a/lib/crewai/tests/agents/test_native_tool_calling.py b/lib/crewai/tests/agents/test_native_tool_calling.py index 73a2c5156..5cc218fa2 100644 --- a/lib/crewai/tests/agents/test_native_tool_calling.py +++ b/lib/crewai/tests/agents/test_native_tool_calling.py @@ -1158,16 +1158,12 @@ class TestNativeToolCallingJsonParseError: mock_task.description = "test" mock_task.id = "test-id" - executor = object.__new__(CrewAgentExecutor) + executor = CrewAgentExecutor( + tools=structured_tools, + original_tools=tools, + ) executor.agent = mock_agent executor.task = mock_task - executor.crew = Mock() - executor.tools = structured_tools - executor.original_tools = tools - executor.tools_handler = None - executor._printer = Mock() - executor.messages = [] - return executor def test_malformed_json_returns_parse_error(self) -> None: diff --git a/lib/crewai/tests/memory/test_memory_root_scope.py b/lib/crewai/tests/memory/test_memory_root_scope.py index 8b0c382af..8872a9e09 100644 --- a/lib/crewai/tests/memory/test_memory_root_scope.py +++ b/lib/crewai/tests/memory/test_memory_root_scope.py @@ -523,11 +523,10 @@ class TestAgentScopeExtension: def test_agent_save_extends_crew_root_scope(self) -> None: """Agent._save_to_memory extends crew's root_scope with agent info.""" - from crewai.agents.agent_builder.base_agent_executor_mixin import ( - CrewAgentExecutorMixin, + from crewai.agents.agent_builder.base_agent_executor import ( + BaseAgentExecutor, ) from crewai.agents.parser import AgentFinish - from crewai.utilities.printer import Printer mock_memory = MagicMock() mock_memory.read_only = False @@ -543,17 +542,10 @@ class TestAgentScopeExtension: mock_task.description = "Research task" mock_task.expected_output = "Report" - class MinimalExecutor(CrewAgentExecutorMixin): - crew = None - agent = mock_agent - task = mock_task - iterations = 0 - max_iter = 1 - messages = [] - _i18n = MagicMock() - _printer = Printer() + executor = BaseAgentExecutor() + executor.agent = mock_agent + executor.task = mock_task - executor = MinimalExecutor() executor._save_to_memory(AgentFinish(thought="", output="Result", text="Result")) mock_memory.remember_many.assert_called_once() @@ -562,11 +554,10 @@ class TestAgentScopeExtension: def test_agent_save_sanitizes_role(self) -> None: """Agent role with special chars is sanitized for scope path.""" - from crewai.agents.agent_builder.base_agent_executor_mixin import ( - CrewAgentExecutorMixin, + from crewai.agents.agent_builder.base_agent_executor import ( + BaseAgentExecutor, ) from crewai.agents.parser import AgentFinish - from crewai.utilities.printer import Printer mock_memory = MagicMock() mock_memory.read_only = False @@ -582,17 +573,10 @@ class TestAgentScopeExtension: mock_task.description = "Task" mock_task.expected_output = "Output" - class MinimalExecutor(CrewAgentExecutorMixin): - crew = None - agent = mock_agent - task = mock_task - iterations = 0 - max_iter = 1 - messages = [] - _i18n = MagicMock() - _printer = Printer() + executor = BaseAgentExecutor() + executor.agent = mock_agent + executor.task = mock_task - executor = MinimalExecutor() executor._save_to_memory(AgentFinish(thought="", output="R", text="R")) call_kwargs = mock_memory.remember_many.call_args.kwargs @@ -1057,11 +1041,10 @@ class TestAgentExecutorBackwardCompat: def test_agent_executor_no_root_scope_when_memory_has_none(self) -> None: """Agent executor doesn't inject root_scope when memory has none.""" - from crewai.agents.agent_builder.base_agent_executor_mixin import ( - CrewAgentExecutorMixin, + from crewai.agents.agent_builder.base_agent_executor import ( + BaseAgentExecutor, ) from crewai.agents.parser import AgentFinish - from crewai.utilities.printer import Printer mock_memory = MagicMock() mock_memory.read_only = False @@ -1077,17 +1060,10 @@ class TestAgentExecutorBackwardCompat: mock_task.description = "Task" mock_task.expected_output = "Output" - class MinimalExecutor(CrewAgentExecutorMixin): - crew = None - agent = mock_agent - task = mock_task - iterations = 0 - max_iter = 1 - messages = [] - _i18n = MagicMock() - _printer = Printer() + executor = BaseAgentExecutor() + executor.agent = mock_agent + executor.task = mock_task - executor = MinimalExecutor() executor._save_to_memory(AgentFinish(thought="", output="R", text="R")) # Should NOT pass root_scope when memory has none @@ -1097,11 +1073,10 @@ class TestAgentExecutorBackwardCompat: def test_agent_executor_extends_root_scope_when_memory_has_one(self) -> None: """Agent executor extends root_scope when memory has one.""" - from crewai.agents.agent_builder.base_agent_executor_mixin import ( - CrewAgentExecutorMixin, + from crewai.agents.agent_builder.base_agent_executor import ( + BaseAgentExecutor, ) from crewai.agents.parser import AgentFinish - from crewai.utilities.printer import Printer mock_memory = MagicMock() mock_memory.read_only = False @@ -1117,17 +1092,10 @@ class TestAgentExecutorBackwardCompat: mock_task.description = "Task" mock_task.expected_output = "Output" - class MinimalExecutor(CrewAgentExecutorMixin): - crew = None - agent = mock_agent - task = mock_task - iterations = 0 - max_iter = 1 - messages = [] - _i18n = MagicMock() - _printer = Printer() + executor = BaseAgentExecutor() + executor.agent = mock_agent + executor.task = mock_task - executor = MinimalExecutor() executor._save_to_memory(AgentFinish(thought="", output="R", text="R")) # Should pass extended root_scope diff --git a/lib/crewai/tests/memory/test_unified_memory.py b/lib/crewai/tests/memory/test_unified_memory.py index f36bf0c2b..05bb977ac 100644 --- a/lib/crewai/tests/memory/test_unified_memory.py +++ b/lib/crewai/tests/memory/test_unified_memory.py @@ -351,7 +351,7 @@ def test_memory_extract_memories_empty_content_returns_empty_list(tmp_path: Path def test_executor_save_to_memory_calls_extract_then_remember_per_item() -> None: """_save_to_memory calls memory.extract_memories(raw) then memory.remember(m) for each.""" - from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin + from crewai.agents.agent_builder.base_agent_executor import BaseAgentExecutor from crewai.agents.parser import AgentFinish mock_memory = MagicMock() @@ -367,17 +367,9 @@ def test_executor_save_to_memory_calls_extract_then_remember_per_item() -> None: mock_task.description = "Do research" mock_task.expected_output = "A report" - class MinimalExecutor(CrewAgentExecutorMixin): - crew = None - agent = mock_agent - task = mock_task - iterations = 0 - max_iter = 1 - messages = [] - _i18n = MagicMock() - _printer = Printer() - - executor = MinimalExecutor() + executor = BaseAgentExecutor() + executor.agent = mock_agent + executor.task = mock_task executor._save_to_memory( AgentFinish(thought="", output="We found X and Y.", text="We found X and Y.") ) @@ -391,7 +383,7 @@ def test_executor_save_to_memory_calls_extract_then_remember_per_item() -> None: def test_executor_save_to_memory_skips_delegation_output() -> None: """_save_to_memory does nothing when output contains delegate action.""" - from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin + from crewai.agents.agent_builder.base_agent_executor import BaseAgentExecutor from crewai.agents.parser import AgentFinish from crewai.utilities.string_utils import sanitize_tool_name @@ -400,21 +392,15 @@ def test_executor_save_to_memory_skips_delegation_output() -> None: mock_agent = MagicMock() mock_agent.memory = mock_memory mock_agent._logger = MagicMock() - mock_task = MagicMock(description="Task", expected_output="Out") - - class MinimalExecutor(CrewAgentExecutorMixin): - crew = None - agent = mock_agent - task = mock_task - iterations = 0 - max_iter = 1 - messages = [] - _i18n = MagicMock() - _printer = Printer() + mock_task = MagicMock() + mock_task.description = "Task" + mock_task.expected_output = "Out" delegate_text = f"Action: {sanitize_tool_name('Delegate work to coworker')}" full_text = delegate_text + " rest" - executor = MinimalExecutor() + executor = BaseAgentExecutor() + executor.agent = mock_agent + executor.task = mock_task executor._save_to_memory( AgentFinish(thought="", output=full_text, text=full_text) ) diff --git a/lib/crewai/tests/rag/embeddings/test_google_vertex_memory_integration.py b/lib/crewai/tests/rag/embeddings/test_google_vertex_memory_integration.py index 149320adf..28ea84304 100644 --- a/lib/crewai/tests/rag/embeddings/test_google_vertex_memory_integration.py +++ b/lib/crewai/tests/rag/embeddings/test_google_vertex_memory_integration.py @@ -102,7 +102,7 @@ def test_crew_memory_with_google_vertex_embedder( # Mock _save_to_memory during kickoff so it doesn't make embedding API calls # that VCR can't replay (GCP metadata auth, embedding endpoints). with patch( - "crewai.agents.agent_builder.base_agent_executor_mixin.CrewAgentExecutorMixin._save_to_memory" + "crewai.agents.agent_builder.base_agent_executor.BaseAgentExecutor._save_to_memory" ): result = crew.kickoff() @@ -163,7 +163,7 @@ def test_crew_memory_with_google_vertex_project_id(simple_agent, simple_task) -> assert crew._memory is memory with patch( - "crewai.agents.agent_builder.base_agent_executor_mixin.CrewAgentExecutorMixin._save_to_memory" + "crewai.agents.agent_builder.base_agent_executor.BaseAgentExecutor._save_to_memory" ): result = crew.kickoff() diff --git a/lib/crewai/tests/test_crew.py b/lib/crewai/tests/test_crew.py index f941a7965..9621a1f0d 100644 --- a/lib/crewai/tests/test_crew.py +++ b/lib/crewai/tests/test_crew.py @@ -2141,6 +2141,7 @@ def test_task_same_callback_both_on_task_and_crew(): @pytest.mark.vcr() def test_tools_with_custom_caching(): + @tool def multiplcation_tool(first_number: int, second_number: int) -> int: """Useful for when you need to multiply two numbers together.""" diff --git a/lib/crewai/tests/test_event_record.py b/lib/crewai/tests/test_event_record.py new file mode 100644 index 000000000..d0be4ec76 --- /dev/null +++ b/lib/crewai/tests/test_event_record.py @@ -0,0 +1,423 @@ +"""Tests for EventRecord data structure and RuntimeState integration.""" + +from __future__ import annotations + +import json + +import pytest + +from crewai.events.base_events import BaseEvent +from crewai.state.event_record import EventRecord, EventNode + + +# ── Helpers ────────────────────────────────────────────────────────── + + +def _event(type: str, **kwargs) -> BaseEvent: + return BaseEvent(type=type, **kwargs) + + +def _linear_record(n: int = 5) -> tuple[EventRecord, list[BaseEvent]]: + """Build a simple chain: e0 → e1 → e2 → ... with previous_event_id.""" + g = EventRecord() + events: list[BaseEvent] = [] + for i in range(n): + e = _event( + f"step_{i}", + previous_event_id=events[-1].event_id if events else None, + emission_sequence=i + 1, + ) + events.append(e) + g.add(e) + return g, events + + +def _tree_record() -> tuple[EventRecord, dict[str, BaseEvent]]: + """Build a parent/child tree: + + crew_start + ├── task_start + │ ├── agent_start + │ └── agent_complete (started=agent_start) + └── task_complete (started=task_start) + """ + g = EventRecord() + crew_start = _event("crew_kickoff_started", emission_sequence=1) + task_start = _event( + "task_started", + parent_event_id=crew_start.event_id, + previous_event_id=crew_start.event_id, + emission_sequence=2, + ) + agent_start = _event( + "agent_execution_started", + parent_event_id=task_start.event_id, + previous_event_id=task_start.event_id, + emission_sequence=3, + ) + agent_complete = _event( + "agent_execution_completed", + parent_event_id=task_start.event_id, + previous_event_id=agent_start.event_id, + started_event_id=agent_start.event_id, + emission_sequence=4, + ) + task_complete = _event( + "task_completed", + parent_event_id=crew_start.event_id, + previous_event_id=agent_complete.event_id, + started_event_id=task_start.event_id, + emission_sequence=5, + ) + + for e in [crew_start, task_start, agent_start, agent_complete, task_complete]: + g.add(e) + + return g, { + "crew_start": crew_start, + "task_start": task_start, + "agent_start": agent_start, + "agent_complete": agent_complete, + "task_complete": task_complete, + } + + +# ── EventNode tests ───────────────────────────────────────────────── + + +class TestEventNode: + def test_add_edge(self): + node = EventNode(event=_event("test")) + node.add_edge("child", "abc") + assert node.neighbors("child") == ["abc"] + + def test_neighbors_empty(self): + node = EventNode(event=_event("test")) + assert node.neighbors("parent") == [] + + def test_multiple_edges_same_type(self): + node = EventNode(event=_event("test")) + node.add_edge("child", "a") + node.add_edge("child", "b") + assert node.neighbors("child") == ["a", "b"] + + +# ── EventRecord core tests ─────────────────────────────────────────── + + +class TestEventRecordCore: + def test_add_single_event(self): + g = EventRecord() + e = _event("test") + node = g.add(e) + assert len(g) == 1 + assert e.event_id in g + assert node.event.type == "test" + + def test_get_existing(self): + g = EventRecord() + e = _event("test") + g.add(e) + assert g.get(e.event_id) is not None + + def test_get_missing(self): + g = EventRecord() + assert g.get("nonexistent") is None + + def test_contains(self): + g = EventRecord() + e = _event("test") + g.add(e) + assert e.event_id in g + assert "missing" not in g + + +# ── Edge wiring tests ─────────────────────────────────────────────── + + +class TestEdgeWiring: + def test_parent_child_bidirectional(self): + g = EventRecord() + parent = _event("parent") + child = _event("child", parent_event_id=parent.event_id) + g.add(parent) + g.add(child) + + parent_node = g.get(parent.event_id) + child_node = g.get(child.event_id) + assert child.event_id in parent_node.neighbors("child") + assert parent.event_id in child_node.neighbors("parent") + + def test_previous_next_bidirectional(self): + g, events = _linear_record(3) + node0 = g.get(events[0].event_id) + node1 = g.get(events[1].event_id) + node2 = g.get(events[2].event_id) + + assert events[1].event_id in node0.neighbors("next") + assert events[0].event_id in node1.neighbors("previous") + assert events[2].event_id in node1.neighbors("next") + assert events[1].event_id in node2.neighbors("previous") + + def test_trigger_bidirectional(self): + g = EventRecord() + cause = _event("cause") + effect = _event("effect", triggered_by_event_id=cause.event_id) + g.add(cause) + g.add(effect) + + assert effect.event_id in g.get(cause.event_id).neighbors("trigger") + assert cause.event_id in g.get(effect.event_id).neighbors("triggered_by") + + def test_started_completed_by_bidirectional(self): + g = EventRecord() + start = _event("start") + end = _event("end", started_event_id=start.event_id) + g.add(start) + g.add(end) + + assert end.event_id in g.get(start.event_id).neighbors("completed_by") + assert start.event_id in g.get(end.event_id).neighbors("started") + + def test_dangling_reference_ignored(self): + """Edge to a non-existent node should not be wired.""" + g = EventRecord() + e = _event("orphan", parent_event_id="nonexistent") + g.add(e) + node = g.get(e.event_id) + assert node.neighbors("parent") == [] + + +# ── Edge symmetry validation ───────────────────────────────────────── + + +SYMMETRIC_PAIRS = [ + ("parent", "child"), + ("previous", "next"), + ("triggered_by", "trigger"), + ("started", "completed_by"), +] + + +class TestEdgeSymmetry: + @pytest.mark.parametrize("forward,reverse", SYMMETRIC_PAIRS) + def test_symmetry_on_tree(self, forward, reverse): + g, _ = _tree_record() + for node_id, node in g.nodes.items(): + for target_id in node.neighbors(forward): + target_node = g.get(target_id) + assert target_node is not None, f"{target_id} missing from record" + assert node_id in target_node.neighbors(reverse), ( + f"Asymmetric edge: {node_id} --{forward.value}--> {target_id} " + f"but {target_id} has no {reverse.value} back to {node_id}" + ) + + @pytest.mark.parametrize("forward,reverse", SYMMETRIC_PAIRS) + def test_symmetry_on_linear(self, forward, reverse): + g, _ = _linear_record(10) + for node_id, node in g.nodes.items(): + for target_id in node.neighbors(forward): + target_node = g.get(target_id) + assert target_node is not None + assert node_id in target_node.neighbors(reverse) + + +# ── Ordering tests ─────────────────────────────────────────────────── + + +class TestOrdering: + def test_emission_sequence_monotonic(self): + g, events = _linear_record(10) + sequences = [e.emission_sequence for e in events] + assert sequences == sorted(sequences) + assert len(set(sequences)) == len(sequences), "Duplicate sequences" + + def test_next_chain_follows_sequence_order(self): + g, events = _linear_record(5) + current = g.get(events[0].event_id) + visited = [] + while current: + visited.append(current.event.event_id) + nexts = current.neighbors("next") + current = g.get(nexts[0]) if nexts else None + assert visited == [e.event_id for e in events] + + +# ── Traversal tests ───────────────────────────────────────────────── + + +class TestTraversal: + def test_roots_single_root(self): + g, events = _tree_record() + roots = g.roots() + assert len(roots) == 1 + assert roots[0].event.type == "crew_kickoff_started" + + def test_roots_multiple(self): + g = EventRecord() + g.add(_event("root1")) + g.add(_event("root2")) + assert len(g.roots()) == 2 + + def test_descendants_of_crew_start(self): + g, events = _tree_record() + desc = g.descendants(events["crew_start"].event_id) + desc_types = {n.event.type for n in desc} + assert desc_types == { + "task_started", + "task_completed", + "agent_execution_started", + "agent_execution_completed", + } + + def test_descendants_of_leaf(self): + g, events = _tree_record() + desc = g.descendants(events["task_complete"].event_id) + assert desc == [] + + def test_descendants_does_not_include_self(self): + g, events = _tree_record() + desc = g.descendants(events["crew_start"].event_id) + desc_ids = {n.event.event_id for n in desc} + assert events["crew_start"].event_id not in desc_ids + + +# ── Serialization round-trip tests ────────────────────────────────── + + +class TestSerialization: + def test_empty_record_roundtrip(self): + g = EventRecord() + restored = EventRecord.model_validate_json(g.model_dump_json()) + assert len(restored) == 0 + + def test_linear_record_roundtrip(self): + g, events = _linear_record(5) + restored = EventRecord.model_validate_json(g.model_dump_json()) + assert len(restored) == 5 + for e in events: + assert e.event_id in restored + + def test_tree_record_roundtrip(self): + g, events = _tree_record() + restored = EventRecord.model_validate_json(g.model_dump_json()) + assert len(restored) == 5 + + # Verify edges survived + crew_node = restored.get(events["crew_start"].event_id) + assert len(crew_node.neighbors("child")) == 2 + + def test_roundtrip_preserves_edge_symmetry(self): + g, _ = _tree_record() + restored = EventRecord.model_validate_json(g.model_dump_json()) + for node_id, node in restored.nodes.items(): + for forward, reverse in SYMMETRIC_PAIRS: + for target_id in node.neighbors(forward): + target_node = restored.get(target_id) + assert node_id in target_node.neighbors(reverse) + + def test_roundtrip_preserves_event_data(self): + g = EventRecord() + e = _event( + "test", + source_type="crew", + task_id="t1", + agent_role="researcher", + emission_sequence=42, + ) + g.add(e) + restored = EventRecord.model_validate_json(g.model_dump_json()) + re = restored.get(e.event_id).event + assert re.type == "test" + assert re.source_type == "crew" + assert re.task_id == "t1" + assert re.agent_role == "researcher" + assert re.emission_sequence == 42 + + +# ── RuntimeState integration tests ────────────────────────────────── + + +class TestRuntimeStateIntegration: + def test_runtime_state_serializes_event_record(self): + from crewai import Agent, Crew, RuntimeState + + if RuntimeState is None: + pytest.skip("RuntimeState unavailable (model_rebuild failed)") + + agent = Agent( + role="test", goal="test", backstory="test", llm="gpt-4o-mini" + ) + crew = Crew(agents=[agent], tasks=[], verbose=False) + state = RuntimeState(root=[crew]) + + e1 = _event("crew_started", emission_sequence=1) + e2 = _event( + "task_started", + parent_event_id=e1.event_id, + emission_sequence=2, + ) + state.event_record.add(e1) + state.event_record.add(e2) + + dumped = json.loads(state.model_dump_json()) + assert "entities" in dumped + assert "event_record" in dumped + assert len(dumped["event_record"]["nodes"]) == 2 + + def test_runtime_state_roundtrip_with_record(self): + from crewai import Agent, Crew, RuntimeState + + if RuntimeState is None: + pytest.skip("RuntimeState unavailable (model_rebuild failed)") + + agent = Agent( + role="test", goal="test", backstory="test", llm="gpt-4o-mini" + ) + crew = Crew(agents=[agent], tasks=[], verbose=False) + state = RuntimeState(root=[crew]) + + e1 = _event("crew_started", emission_sequence=1) + e2 = _event( + "task_started", + parent_event_id=e1.event_id, + emission_sequence=2, + ) + state.event_record.add(e1) + state.event_record.add(e2) + + raw = state.model_dump_json() + restored = RuntimeState.model_validate_json( + raw, context={"from_checkpoint": True} + ) + + assert len(restored.event_record) == 2 + assert e1.event_id in restored.event_record + assert e2.event_id in restored.event_record + + # Verify edges survived + e2_node = restored.event_record.get(e2.event_id) + assert e1.event_id in e2_node.neighbors("parent") + + def test_runtime_state_without_record_still_loads(self): + """Backwards compat: a bare entity list should still validate.""" + from crewai import Agent, Crew, RuntimeState + + if RuntimeState is None: + pytest.skip("RuntimeState unavailable (model_rebuild failed)") + + agent = Agent( + role="test", goal="test", backstory="test", llm="gpt-4o-mini" + ) + crew = Crew(agents=[agent], tasks=[], verbose=False) + state = RuntimeState(root=[crew]) + + # Simulate old-format JSON (just the entity list) + old_json = json.dumps( + [json.loads(crew.model_dump_json())] + ) + restored = RuntimeState.model_validate_json( + old_json, context={"from_checkpoint": True} + ) + assert len(restored.root) == 1 + assert len(restored.event_record) == 0 \ No newline at end of file diff --git a/uv.lock b/uv.lock index 13bde6745..66b886731 100644 --- a/uv.lock +++ b/uv.lock @@ -13,7 +13,7 @@ resolution-markers = [ ] [options] -exclude-newer = "2026-04-03T15:34:41.894676632Z" +exclude-newer = "2026-04-03T16:45:28.209407Z" exclude-newer-span = "P3D" [manifest] @@ -932,7 +932,7 @@ name = "coloredlogs" version = "15.0.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "humanfriendly" }, + { name = "humanfriendly", marker = "python_full_version < '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/cc/c7/eed8f27100517e8c0e6b923d5f0845d0cb99763da6fdee00478f91db7325/coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0", size = 278520, upload-time = "2021-06-11T10:22:45.202Z" } wheels = [ @@ -1199,6 +1199,7 @@ wheels = [ name = "crewai" source = { editable = "lib/crewai" } dependencies = [ + { name = "aiofiles" }, { name = "aiosqlite" }, { name = "appdirs" }, { name = "chromadb" }, @@ -1295,6 +1296,7 @@ requires-dist = [ { name = "a2a-sdk", marker = "extra == 'a2a'", specifier = "~=0.3.10" }, { name = "aiobotocore", marker = "extra == 'aws'", specifier = "~=2.25.2" }, { name = "aiocache", extras = ["memcached", "redis"], marker = "extra == 'a2a'", specifier = "~=0.12.3" }, + { name = "aiofiles", specifier = "~=24.1.0" }, { name = "aiosqlite", specifier = "~=0.21.0" }, { name = "anthropic", marker = "extra == 'anthropic'", specifier = "~=0.73.0" }, { name = "appdirs", specifier = "~=1.4.4" }, @@ -2046,7 +2048,7 @@ name = "exceptiongroup" version = "1.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } wheels = [ @@ -2771,7 +2773,7 @@ name = "humanfriendly" version = "10.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pyreadline3", marker = "sys_platform == 'win32'" }, + { name = "pyreadline3", marker = "python_full_version < '3.11' and sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/cc/3f/2c29224acb2e2df4d2046e4c73ee2662023c58ff5b113c4c1adac0886c43/humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc", size = 360702, upload-time = "2021-09-17T21:40:43.31Z" } wheels = [ @@ -4843,13 +4845,12 @@ name = "onnxruntime" version = "1.23.2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "coloredlogs" }, - { name = "flatbuffers" }, + { name = "coloredlogs", marker = "python_full_version < '3.11'" }, + { name = "flatbuffers", marker = "python_full_version < '3.11'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "numpy", version = "2.4.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, - { name = "packaging" }, - { name = "protobuf" }, - { name = "sympy" }, + { name = "packaging", marker = "python_full_version < '3.11'" }, + { name = "protobuf", marker = "python_full_version < '3.11'" }, + { name = "sympy", marker = "python_full_version < '3.11'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/35/d6/311b1afea060015b56c742f3531168c1644650767f27ef40062569960587/onnxruntime-1.23.2-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:a7730122afe186a784660f6ec5807138bf9d792fa1df76556b27307ea9ebcbe3", size = 17195934, upload-time = "2025-10-27T23:06:14.143Z" }, From c4e2d7ea3b640c277b0dbd31f1a2cd83f1f2d0a9 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Tue, 7 Apr 2026 05:34:25 +0800 Subject: [PATCH 02/21] feat: add CheckpointConfig for automatic checkpointing --- docs/ar/concepts/checkpointing.mdx | 187 ++ docs/docs.json | 1582 +++++++++-------- docs/en/concepts/checkpointing.mdx | 187 ++ docs/ko/concepts/checkpointing.mdx | 187 ++ docs/pt-BR/concepts/checkpointing.mdx | 187 ++ lib/crewai/src/crewai/__init__.py | 2 + .../crewai/agents/agent_builder/base_agent.py | 6 + lib/crewai/src/crewai/crew.py | 6 + lib/crewai/src/crewai/flow/flow.py | 2 + lib/crewai/src/crewai/state/__init__.py | 4 + .../src/crewai/state/checkpoint_config.py | 193 ++ .../src/crewai/state/checkpoint_listener.py | 176 ++ lib/crewai/tests/test_checkpoint.py | 169 ++ 13 files changed, 2113 insertions(+), 775 deletions(-) create mode 100644 docs/ar/concepts/checkpointing.mdx create mode 100644 docs/en/concepts/checkpointing.mdx create mode 100644 docs/ko/concepts/checkpointing.mdx create mode 100644 docs/pt-BR/concepts/checkpointing.mdx create mode 100644 lib/crewai/src/crewai/state/checkpoint_config.py create mode 100644 lib/crewai/src/crewai/state/checkpoint_listener.py create mode 100644 lib/crewai/tests/test_checkpoint.py diff --git a/docs/ar/concepts/checkpointing.mdx b/docs/ar/concepts/checkpointing.mdx new file mode 100644 index 000000000..442a98bea --- /dev/null +++ b/docs/ar/concepts/checkpointing.mdx @@ -0,0 +1,187 @@ +--- +title: Checkpointing +description: حفظ حالة التنفيذ تلقائيا حتى تتمكن الطواقم والتدفقات والوكلاء من الاستئناف بعد الفشل. +icon: floppy-disk +mode: "wide" +--- + + +الـ Checkpointing في اصدار مبكر. قد تتغير واجهات البرمجة في الاصدارات المستقبلية. + + +## نظرة عامة + +يقوم الـ Checkpointing بحفظ حالة التنفيذ تلقائيا اثناء التشغيل. اذا فشل طاقم او تدفق او وكيل اثناء التنفيذ، يمكنك الاستعادة من اخر نقطة حفظ والاستئناف دون اعادة تنفيذ العمل المكتمل. + +## البداية السريعة + +```python +from crewai import Crew, CheckpointConfig + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=True, # يستخدم الافتراضيات: ./.checkpoints, عند task_completed +) +result = crew.kickoff() +``` + +تتم كتابة ملفات نقاط الحفظ في `./.checkpoints/` بعد اكتمال كل مهمة. + +## التكوين + +استخدم `CheckpointConfig` للتحكم الكامل: + +```python +from crewai import Crew, CheckpointConfig + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=CheckpointConfig( + directory="./my_checkpoints", + on_events=["task_completed", "crew_kickoff_completed"], + max_checkpoints=5, + ), +) +``` + +### حقول CheckpointConfig + +| الحقل | النوع | الافتراضي | الوصف | +|:------|:------|:----------|:------| +| `directory` | `str` | `"./.checkpoints"` | مسار ملفات نقاط الحفظ | +| `on_events` | `list[str]` | `["task_completed"]` | انواع الاحداث التي تطلق نقطة حفظ | +| `provider` | `BaseProvider` | `JsonProvider()` | واجهة التخزين | +| `max_checkpoints` | `int \| None` | `None` | الحد الاقصى للملفات؛ يتم حذف الاقدم اولا | + +### الوراثة والانسحاب + +يقبل حقل `checkpoint` في Crew و Flow و Agent قيم `CheckpointConfig` او `True` او `False` او `None`: + +| القيمة | السلوك | +|:-------|:-------| +| `None` (افتراضي) | يرث من الاصل. الوكيل يرث اعدادات الطاقم. | +| `True` | تفعيل بالاعدادات الافتراضية. | +| `False` | انسحاب صريح. يوقف الوراثة من الاصل. | +| `CheckpointConfig(...)` | اعدادات مخصصة. | + +```python +crew = Crew( + agents=[ + Agent(role="Researcher", ...), # يرث checkpoint من الطاقم + Agent(role="Writer", ..., checkpoint=False), # منسحب، بدون نقاط حفظ + ], + tasks=[...], + checkpoint=True, +) +``` + +## الاستئناف من نقطة حفظ + +```python +# استعادة واستئناف +crew = Crew.from_checkpoint("./my_checkpoints/20260407T120000_abc123.json") +result = crew.kickoff() # يستأنف من اخر مهمة مكتملة +``` + +يتخطى الطاقم المستعاد المهام المكتملة ويستأنف من اول مهمة غير مكتملة. + +## يعمل على Crew و Flow و Agent + +### Crew + +```python +crew = Crew( + agents=[researcher, writer], + tasks=[research_task, write_task, review_task], + checkpoint=CheckpointConfig(directory="./crew_cp"), +) +``` + +المشغل الافتراضي: `task_completed` (نقطة حفظ واحدة لكل مهمة مكتملة). + +### Flow + +```python +from crewai.flow.flow import Flow, start, listen +from crewai import CheckpointConfig + +class MyFlow(Flow): + @start() + def step_one(self): + return "data" + + @listen(step_one) + def step_two(self, data): + return process(data) + +flow = MyFlow( + checkpoint=CheckpointConfig( + directory="./flow_cp", + on_events=["method_execution_finished"], + ), +) +result = flow.kickoff() + +# استئناف +flow = MyFlow.from_checkpoint("./flow_cp/20260407T120000_abc123.json") +result = flow.kickoff() +``` + +### Agent + +```python +agent = Agent( + role="Researcher", + goal="Research topics", + backstory="Expert researcher", + checkpoint=CheckpointConfig( + directory="./agent_cp", + on_events=["lite_agent_execution_completed"], + ), +) +result = agent.kickoff(messages=[{"role": "user", "content": "Research AI trends"}]) +``` + +## انواع الاحداث + +يقبل حقل `on_events` اي مجموعة من سلاسل انواع الاحداث. الخيارات الشائعة: + +| حالة الاستخدام | الاحداث | +|:---------------|:--------| +| بعد كل مهمة (Crew) | `["task_completed"]` | +| بعد كل طريقة في التدفق | `["method_execution_finished"]` | +| بعد تنفيذ الوكيل | `["agent_execution_completed"]`, `["lite_agent_execution_completed"]` | +| عند اكتمال الطاقم فقط | `["crew_kickoff_completed"]` | +| بعد كل استدعاء LLM | `["llm_call_completed"]` | +| على كل شيء | `["*"]` | + + +استخدام `["*"]` او احداث عالية التردد مثل `llm_call_completed` سيكتب العديد من ملفات نقاط الحفظ وقد يؤثر على الاداء. استخدم `max_checkpoints` للحد من استخدام المساحة. + + +## نقاط الحفظ اليدوية + +للتحكم الكامل، سجل معالج الاحداث الخاص بك واستدع `state.checkpoint()` مباشرة: + +```python +from crewai.events.event_bus import crewai_event_bus +from crewai.events.types.llm_events import LLMCallCompletedEvent + +# معالج متزامن +@crewai_event_bus.on(LLMCallCompletedEvent) +def on_llm_done(source, event, state): + path = state.checkpoint("./my_checkpoints") + print(f"تم حفظ نقطة الحفظ: {path}") + +# معالج غير متزامن +@crewai_event_bus.on(LLMCallCompletedEvent) +async def on_llm_done_async(source, event, state): + path = await state.acheckpoint("./my_checkpoints") + print(f"تم حفظ نقطة الحفظ: {path}") +``` + +وسيط `state` هو `RuntimeState` الذي يتم تمريره تلقائيا بواسطة ناقل الاحداث عندما يقبل المعالج 3 معاملات. يمكنك تسجيل معالجات على اي نوع حدث مدرج في وثائق [Event Listeners](/ar/concepts/event-listener). + +الـ Checkpointing يعمل بافضل جهد: اذا فشلت كتابة نقطة حفظ، يتم تسجيل الخطأ ولكن التنفيذ يستمر دون انقطاع. diff --git a/docs/docs.json b/docs/docs.json index 68ee0e7af..2fea532ef 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -168,7 +168,8 @@ "en/concepts/testing", "en/concepts/cli", "en/concepts/tools", - "en/concepts/event-listener" + "en/concepts/event-listener", + "en/concepts/checkpointing" ] }, { @@ -639,7 +640,8 @@ "en/concepts/testing", "en/concepts/cli", "en/concepts/tools", - "en/concepts/event-listener" + "en/concepts/event-listener", + "en/concepts/checkpointing" ] }, { @@ -1109,7 +1111,8 @@ "en/concepts/testing", "en/concepts/cli", "en/concepts/tools", - "en/concepts/event-listener" + "en/concepts/event-listener", + "en/concepts/checkpointing" ] }, { @@ -1578,7 +1581,8 @@ "en/concepts/testing", "en/concepts/cli", "en/concepts/tools", - "en/concepts/event-listener" + "en/concepts/event-listener", + "en/concepts/checkpointing" ] }, { @@ -2047,7 +2051,8 @@ "en/concepts/testing", "en/concepts/cli", "en/concepts/tools", - "en/concepts/event-listener" + "en/concepts/event-listener", + "en/concepts/checkpointing" ] }, { @@ -2516,7 +2521,8 @@ "en/concepts/testing", "en/concepts/cli", "en/concepts/tools", - "en/concepts/event-listener" + "en/concepts/event-listener", + "en/concepts/checkpointing" ] }, { @@ -2987,7 +2993,8 @@ "en/concepts/testing", "en/concepts/cli", "en/concepts/tools", - "en/concepts/event-listener" + "en/concepts/event-listener", + "en/concepts/checkpointing" ] }, { @@ -3457,7 +3464,8 @@ "en/concepts/testing", "en/concepts/cli", "en/concepts/tools", - "en/concepts/event-listener" + "en/concepts/event-listener", + "en/concepts/checkpointing" ] }, { @@ -3830,7 +3838,7 @@ "icon": "globe" }, { - "anchor": "Fórum", + "anchor": "F\u00f3rum", "href": "https://community.crewai.com", "icon": "discourse" }, @@ -3852,7 +3860,7 @@ "default": true, "tabs": [ { - "tab": "Início", + "tab": "In\u00edcio", "icon": "house", "groups": [ { @@ -3864,11 +3872,11 @@ ] }, { - "tab": "Documentação", + "tab": "Documenta\u00e7\u00e3o", "icon": "book-open", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -3879,7 +3887,7 @@ "group": "Guias", "pages": [ { - "group": "Estratégia", + "group": "Estrat\u00e9gia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -3915,14 +3923,14 @@ ] }, { - "group": "Ferramentas de Codificação", + "group": "Ferramentas de Codifica\u00e7\u00e3o", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avançado", + "group": "Avan\u00e7ado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -3930,7 +3938,7 @@ ] }, { - "group": "Migração", + "group": "Migra\u00e7\u00e3o", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -3960,11 +3968,12 @@ "pt-BR/concepts/testing", "pt-BR/concepts/cli", "pt-BR/concepts/tools", - "pt-BR/concepts/event-listener" + "pt-BR/concepts/event-listener", + "pt-BR/concepts/checkpointing" ] }, { - "group": "Integração MCP", + "group": "Integra\u00e7\u00e3o MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -3998,7 +4007,7 @@ ] }, { - "group": "Web Scraping & Navegação", + "group": "Web Scraping & Navega\u00e7\u00e3o", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -4079,7 +4088,7 @@ ] }, { - "group": "Automação", + "group": "Automa\u00e7\u00e3o", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -4154,7 +4163,7 @@ "icon": "briefcase", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -4186,7 +4195,7 @@ ] }, { - "group": "Documentação de Integração", + "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -4262,11 +4271,11 @@ ] }, { - "tab": "Referência da API", + "tab": "Refer\u00eancia da API", "icon": "magnifying-glass", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -4291,11 +4300,11 @@ ] }, { - "tab": "Notas de Versão", + "tab": "Notas de Vers\u00e3o", "icon": "clock", "groups": [ { - "group": "Notas de Versão", + "group": "Notas de Vers\u00e3o", "pages": [ "pt-BR/changelog" ] @@ -4308,7 +4317,7 @@ "version": "v1.12.2", "tabs": [ { - "tab": "Início", + "tab": "In\u00edcio", "icon": "house", "groups": [ { @@ -4320,11 +4329,11 @@ ] }, { - "tab": "Documentação", + "tab": "Documenta\u00e7\u00e3o", "icon": "book-open", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -4335,7 +4344,7 @@ "group": "Guias", "pages": [ { - "group": "Estratégia", + "group": "Estrat\u00e9gia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -4371,14 +4380,14 @@ ] }, { - "group": "Ferramentas de Codificação", + "group": "Ferramentas de Codifica\u00e7\u00e3o", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avançado", + "group": "Avan\u00e7ado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -4386,7 +4395,7 @@ ] }, { - "group": "Migração", + "group": "Migra\u00e7\u00e3o", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -4416,11 +4425,12 @@ "pt-BR/concepts/testing", "pt-BR/concepts/cli", "pt-BR/concepts/tools", - "pt-BR/concepts/event-listener" + "pt-BR/concepts/event-listener", + "pt-BR/concepts/checkpointing" ] }, { - "group": "Integração MCP", + "group": "Integra\u00e7\u00e3o MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -4454,7 +4464,7 @@ ] }, { - "group": "Web Scraping & Navegação", + "group": "Web Scraping & Navega\u00e7\u00e3o", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -4535,7 +4545,7 @@ ] }, { - "group": "Automação", + "group": "Automa\u00e7\u00e3o", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -4610,7 +4620,7 @@ "icon": "briefcase", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -4642,7 +4652,7 @@ ] }, { - "group": "Documentação de Integração", + "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -4718,11 +4728,11 @@ ] }, { - "tab": "Referência da API", + "tab": "Refer\u00eancia da API", "icon": "magnifying-glass", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -4747,11 +4757,11 @@ ] }, { - "tab": "Notas de Versão", + "tab": "Notas de Vers\u00e3o", "icon": "clock", "groups": [ { - "group": "Notas de Versão", + "group": "Notas de Vers\u00e3o", "pages": [ "pt-BR/changelog" ] @@ -4764,7 +4774,7 @@ "version": "v1.12.1", "tabs": [ { - "tab": "Início", + "tab": "In\u00edcio", "icon": "house", "groups": [ { @@ -4776,11 +4786,11 @@ ] }, { - "tab": "Documentação", + "tab": "Documenta\u00e7\u00e3o", "icon": "book-open", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -4791,7 +4801,7 @@ "group": "Guias", "pages": [ { - "group": "Estratégia", + "group": "Estrat\u00e9gia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -4827,14 +4837,14 @@ ] }, { - "group": "Ferramentas de Codificação", + "group": "Ferramentas de Codifica\u00e7\u00e3o", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avançado", + "group": "Avan\u00e7ado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -4842,7 +4852,7 @@ ] }, { - "group": "Migração", + "group": "Migra\u00e7\u00e3o", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -4871,11 +4881,12 @@ "pt-BR/concepts/testing", "pt-BR/concepts/cli", "pt-BR/concepts/tools", - "pt-BR/concepts/event-listener" + "pt-BR/concepts/event-listener", + "pt-BR/concepts/checkpointing" ] }, { - "group": "Integração MCP", + "group": "Integra\u00e7\u00e3o MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -4909,7 +4920,7 @@ ] }, { - "group": "Web Scraping & Navegação", + "group": "Web Scraping & Navega\u00e7\u00e3o", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -4990,7 +5001,7 @@ ] }, { - "group": "Automação", + "group": "Automa\u00e7\u00e3o", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -5065,7 +5076,7 @@ "icon": "briefcase", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -5097,7 +5108,7 @@ ] }, { - "group": "Documentação de Integração", + "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -5173,11 +5184,11 @@ ] }, { - "tab": "Referência da API", + "tab": "Refer\u00eancia da API", "icon": "magnifying-glass", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -5202,11 +5213,11 @@ ] }, { - "tab": "Notas de Versão", + "tab": "Notas de Vers\u00e3o", "icon": "clock", "groups": [ { - "group": "Notas de Versão", + "group": "Notas de Vers\u00e3o", "pages": [ "pt-BR/changelog" ] @@ -5219,7 +5230,7 @@ "version": "v1.12.0", "tabs": [ { - "tab": "Início", + "tab": "In\u00edcio", "icon": "house", "groups": [ { @@ -5231,11 +5242,11 @@ ] }, { - "tab": "Documentação", + "tab": "Documenta\u00e7\u00e3o", "icon": "book-open", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -5246,7 +5257,7 @@ "group": "Guias", "pages": [ { - "group": "Estratégia", + "group": "Estrat\u00e9gia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -5282,14 +5293,14 @@ ] }, { - "group": "Ferramentas de Codificação", + "group": "Ferramentas de Codifica\u00e7\u00e3o", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avançado", + "group": "Avan\u00e7ado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -5297,7 +5308,7 @@ ] }, { - "group": "Migração", + "group": "Migra\u00e7\u00e3o", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -5326,11 +5337,12 @@ "pt-BR/concepts/testing", "pt-BR/concepts/cli", "pt-BR/concepts/tools", - "pt-BR/concepts/event-listener" + "pt-BR/concepts/event-listener", + "pt-BR/concepts/checkpointing" ] }, { - "group": "Integração MCP", + "group": "Integra\u00e7\u00e3o MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -5364,7 +5376,7 @@ ] }, { - "group": "Web Scraping & Navegação", + "group": "Web Scraping & Navega\u00e7\u00e3o", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -5445,7 +5457,7 @@ ] }, { - "group": "Automação", + "group": "Automa\u00e7\u00e3o", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -5520,7 +5532,7 @@ "icon": "briefcase", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -5552,7 +5564,7 @@ ] }, { - "group": "Documentação de Integração", + "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -5628,11 +5640,11 @@ ] }, { - "tab": "Referência da API", + "tab": "Refer\u00eancia da API", "icon": "magnifying-glass", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -5657,11 +5669,11 @@ ] }, { - "tab": "Notas de Versão", + "tab": "Notas de Vers\u00e3o", "icon": "clock", "groups": [ { - "group": "Notas de Versão", + "group": "Notas de Vers\u00e3o", "pages": [ "pt-BR/changelog" ] @@ -5674,7 +5686,7 @@ "version": "v1.11.1", "tabs": [ { - "tab": "Início", + "tab": "In\u00edcio", "icon": "house", "groups": [ { @@ -5686,11 +5698,11 @@ ] }, { - "tab": "Documentação", + "tab": "Documenta\u00e7\u00e3o", "icon": "book-open", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -5701,7 +5713,7 @@ "group": "Guias", "pages": [ { - "group": "Estratégia", + "group": "Estrat\u00e9gia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -5737,14 +5749,14 @@ ] }, { - "group": "Ferramentas de Codificação", + "group": "Ferramentas de Codifica\u00e7\u00e3o", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avançado", + "group": "Avan\u00e7ado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -5752,7 +5764,7 @@ ] }, { - "group": "Migração", + "group": "Migra\u00e7\u00e3o", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -5781,11 +5793,12 @@ "pt-BR/concepts/testing", "pt-BR/concepts/cli", "pt-BR/concepts/tools", - "pt-BR/concepts/event-listener" + "pt-BR/concepts/event-listener", + "pt-BR/concepts/checkpointing" ] }, { - "group": "Integração MCP", + "group": "Integra\u00e7\u00e3o MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -5819,7 +5832,7 @@ ] }, { - "group": "Web Scraping & Navegação", + "group": "Web Scraping & Navega\u00e7\u00e3o", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -5900,7 +5913,7 @@ ] }, { - "group": "Automação", + "group": "Automa\u00e7\u00e3o", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -5975,7 +5988,7 @@ "icon": "briefcase", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -6007,7 +6020,7 @@ ] }, { - "group": "Documentação de Integração", + "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -6083,11 +6096,11 @@ ] }, { - "tab": "Referência da API", + "tab": "Refer\u00eancia da API", "icon": "magnifying-glass", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -6112,11 +6125,11 @@ ] }, { - "tab": "Notas de Versão", + "tab": "Notas de Vers\u00e3o", "icon": "clock", "groups": [ { - "group": "Notas de Versão", + "group": "Notas de Vers\u00e3o", "pages": [ "pt-BR/changelog" ] @@ -6129,7 +6142,7 @@ "version": "v1.11.0", "tabs": [ { - "tab": "Início", + "tab": "In\u00edcio", "icon": "house", "groups": [ { @@ -6141,11 +6154,11 @@ ] }, { - "tab": "Documentação", + "tab": "Documenta\u00e7\u00e3o", "icon": "book-open", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -6156,7 +6169,7 @@ "group": "Guias", "pages": [ { - "group": "Estratégia", + "group": "Estrat\u00e9gia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -6192,14 +6205,14 @@ ] }, { - "group": "Ferramentas de Codificação", + "group": "Ferramentas de Codifica\u00e7\u00e3o", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avançado", + "group": "Avan\u00e7ado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -6207,7 +6220,7 @@ ] }, { - "group": "Migração", + "group": "Migra\u00e7\u00e3o", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -6235,11 +6248,12 @@ "pt-BR/concepts/testing", "pt-BR/concepts/cli", "pt-BR/concepts/tools", - "pt-BR/concepts/event-listener" + "pt-BR/concepts/event-listener", + "pt-BR/concepts/checkpointing" ] }, { - "group": "Integração MCP", + "group": "Integra\u00e7\u00e3o MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -6273,7 +6287,7 @@ ] }, { - "group": "Web Scraping & Navegação", + "group": "Web Scraping & Navega\u00e7\u00e3o", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -6354,7 +6368,7 @@ ] }, { - "group": "Automação", + "group": "Automa\u00e7\u00e3o", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -6429,7 +6443,7 @@ "icon": "briefcase", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -6461,7 +6475,7 @@ ] }, { - "group": "Documentação de Integração", + "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -6537,11 +6551,11 @@ ] }, { - "tab": "Referência da API", + "tab": "Refer\u00eancia da API", "icon": "magnifying-glass", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -6566,11 +6580,11 @@ ] }, { - "tab": "Notas de Versão", + "tab": "Notas de Vers\u00e3o", "icon": "clock", "groups": [ { - "group": "Notas de Versão", + "group": "Notas de Vers\u00e3o", "pages": [ "pt-BR/changelog" ] @@ -6583,7 +6597,7 @@ "version": "v1.10.1", "tabs": [ { - "tab": "Início", + "tab": "In\u00edcio", "icon": "house", "groups": [ { @@ -6595,11 +6609,11 @@ ] }, { - "tab": "Documentação", + "tab": "Documenta\u00e7\u00e3o", "icon": "book-open", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -6610,7 +6624,7 @@ "group": "Guias", "pages": [ { - "group": "Estratégia", + "group": "Estrat\u00e9gia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -6646,14 +6660,14 @@ ] }, { - "group": "Ferramentas de Codificação", + "group": "Ferramentas de Codifica\u00e7\u00e3o", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avançado", + "group": "Avan\u00e7ado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -6661,7 +6675,7 @@ ] }, { - "group": "Migração", + "group": "Migra\u00e7\u00e3o", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -6689,11 +6703,12 @@ "pt-BR/concepts/testing", "pt-BR/concepts/cli", "pt-BR/concepts/tools", - "pt-BR/concepts/event-listener" + "pt-BR/concepts/event-listener", + "pt-BR/concepts/checkpointing" ] }, { - "group": "Integração MCP", + "group": "Integra\u00e7\u00e3o MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -6727,7 +6742,7 @@ ] }, { - "group": "Web Scraping & Navegação", + "group": "Web Scraping & Navega\u00e7\u00e3o", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -6808,7 +6823,7 @@ ] }, { - "group": "Automação", + "group": "Automa\u00e7\u00e3o", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -6883,7 +6898,7 @@ "icon": "briefcase", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -6915,7 +6930,7 @@ ] }, { - "group": "Documentação de Integração", + "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -6991,11 +7006,11 @@ ] }, { - "tab": "Referência da API", + "tab": "Refer\u00eancia da API", "icon": "magnifying-glass", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -7020,11 +7035,11 @@ ] }, { - "tab": "Notas de Versão", + "tab": "Notas de Vers\u00e3o", "icon": "clock", "groups": [ { - "group": "Notas de Versão", + "group": "Notas de Vers\u00e3o", "pages": [ "pt-BR/changelog" ] @@ -7037,7 +7052,7 @@ "version": "v1.10.0", "tabs": [ { - "tab": "Início", + "tab": "In\u00edcio", "icon": "house", "groups": [ { @@ -7049,11 +7064,11 @@ ] }, { - "tab": "Documentação", + "tab": "Documenta\u00e7\u00e3o", "icon": "book-open", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -7064,7 +7079,7 @@ "group": "Guias", "pages": [ { - "group": "Estratégia", + "group": "Estrat\u00e9gia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -7100,14 +7115,14 @@ ] }, { - "group": "Ferramentas de Codificação", + "group": "Ferramentas de Codifica\u00e7\u00e3o", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avançado", + "group": "Avan\u00e7ado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -7115,7 +7130,7 @@ ] }, { - "group": "Migração", + "group": "Migra\u00e7\u00e3o", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -7144,11 +7159,12 @@ "pt-BR/concepts/testing", "pt-BR/concepts/cli", "pt-BR/concepts/tools", - "pt-BR/concepts/event-listener" + "pt-BR/concepts/event-listener", + "pt-BR/concepts/checkpointing" ] }, { - "group": "Integração MCP", + "group": "Integra\u00e7\u00e3o MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -7182,7 +7198,7 @@ ] }, { - "group": "Web Scraping & Navegação", + "group": "Web Scraping & Navega\u00e7\u00e3o", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -7263,7 +7279,7 @@ ] }, { - "group": "Automação", + "group": "Automa\u00e7\u00e3o", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -7338,7 +7354,7 @@ "icon": "briefcase", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -7370,7 +7386,7 @@ ] }, { - "group": "Documentação de Integração", + "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -7446,11 +7462,11 @@ ] }, { - "tab": "Referência da API", + "tab": "Refer\u00eancia da API", "icon": "magnifying-glass", "groups": [ { - "group": "Começando", + "group": "Come\u00e7ando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -7475,11 +7491,11 @@ ] }, { - "tab": "Notas de Versão", + "tab": "Notas de Vers\u00e3o", "icon": "clock", "groups": [ { - "group": "Notas de Versão", + "group": "Notas de Vers\u00e3o", "pages": [ "pt-BR/changelog" ] @@ -7495,17 +7511,17 @@ "global": { "anchors": [ { - "anchor": "웹사이트", + "anchor": "\uc6f9\uc0ac\uc774\ud2b8", "href": "https://crewai.com", "icon": "globe" }, { - "anchor": "포럼", + "anchor": "\ud3ec\ub7fc", "href": "https://community.crewai.com", "icon": "discourse" }, { - "anchor": "블로그", + "anchor": "\ube14\ub85c\uadf8", "href": "https://blog.crewai.com", "icon": "newspaper" }, @@ -7522,11 +7538,11 @@ "default": true, "tabs": [ { - "tab": "홈", + "tab": "\ud648", "icon": "house", "groups": [ { - "group": "환영합니다", + "group": "\ud658\uc601\ud569\ub2c8\ub2e4", "pages": [ "ko/index" ] @@ -7534,11 +7550,11 @@ ] }, { - "tab": "기술 문서", + "tab": "\uae30\uc220 \ubb38\uc11c", "icon": "book-open", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/introduction", "ko/installation", @@ -7546,31 +7562,31 @@ ] }, { - "group": "가이드", + "group": "\uac00\uc774\ub4dc", "pages": [ { - "group": "전략", + "group": "\uc804\ub7b5", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "에이전트 (Agents)", + "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "크루 (Crews)", + "group": "\ud06c\ub8e8 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "플로우 (Flows)", + "group": "\ud50c\ub85c\uc6b0 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -7578,21 +7594,21 @@ ] }, { - "group": "도구", + "group": "\ub3c4\uad6c", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "코딩 도구", + "group": "\ucf54\ub529 \ub3c4\uad6c", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "고급", + "group": "\uace0\uae09", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -7600,7 +7616,7 @@ ] }, { - "group": "마이그레이션", + "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -7609,7 +7625,7 @@ ] }, { - "group": "핵심 개념", + "group": "\ud575\uc2ec \uac1c\ub150", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -7630,11 +7646,12 @@ "ko/concepts/testing", "ko/concepts/cli", "ko/concepts/tools", - "ko/concepts/event-listener" + "ko/concepts/event-listener", + "ko/concepts/checkpointing" ] }, { - "group": "MCP 통합", + "group": "MCP \ud1b5\ud569", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -7646,11 +7663,11 @@ ] }, { - "group": "도구 (Tools)", + "group": "\ub3c4\uad6c (Tools)", "pages": [ "ko/tools/overview", { - "group": "파일 & 문서", + "group": "\ud30c\uc77c & \ubb38\uc11c", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -7670,7 +7687,7 @@ ] }, { - "group": "웹 스크래핑 & 브라우징", + "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -7690,7 +7707,7 @@ ] }, { - "group": "검색 및 연구", + "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -7712,7 +7729,7 @@ ] }, { - "group": "데이터베이스 & 데이터", + "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -7727,7 +7744,7 @@ ] }, { - "group": "인공지능 & 머신러닝", + "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -7741,7 +7758,7 @@ ] }, { - "group": "클라우드 & 스토리지", + "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -7760,7 +7777,7 @@ ] }, { - "group": "자동화", + "group": "\uc790\ub3d9\ud654", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -7795,7 +7812,7 @@ ] }, { - "group": "학습", + "group": "\ud559\uc2b5", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -7832,17 +7849,17 @@ ] }, { - "tab": "엔터프라이즈", + "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", "icon": "briefcase", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "빌드", + "group": "\ube4c\ub4dc", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -7853,7 +7870,7 @@ ] }, { - "group": "운영", + "group": "\uc6b4\uc601", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -7862,13 +7879,13 @@ ] }, { - "group": "관리", + "group": "\uad00\ub9ac", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "통합 문서", + "group": "\ud1b5\ud569 \ubb38\uc11c", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -7920,7 +7937,7 @@ ] }, { - "group": "트리거", + "group": "\ud2b8\ub9ac\uac70", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -7936,7 +7953,7 @@ ] }, { - "group": "학습 자원", + "group": "\ud559\uc2b5 \uc790\uc6d0", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -7944,11 +7961,11 @@ ] }, { - "tab": "API 레퍼런스", + "tab": "API \ub808\ud37c\ub7f0\uc2a4", "icon": "magnifying-glass", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -7960,11 +7977,11 @@ ] }, { - "tab": "예시", + "tab": "\uc608\uc2dc", "icon": "code", "groups": [ { - "group": "예시", + "group": "\uc608\uc2dc", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -7973,11 +7990,11 @@ ] }, { - "tab": "변경 로그", + "tab": "\ubcc0\uacbd \ub85c\uadf8", "icon": "clock", "groups": [ { - "group": "릴리스 노트", + "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", "pages": [ "ko/changelog" ] @@ -7990,11 +8007,11 @@ "version": "v1.12.2", "tabs": [ { - "tab": "홈", + "tab": "\ud648", "icon": "house", "groups": [ { - "group": "환영합니다", + "group": "\ud658\uc601\ud569\ub2c8\ub2e4", "pages": [ "ko/index" ] @@ -8002,11 +8019,11 @@ ] }, { - "tab": "기술 문서", + "tab": "\uae30\uc220 \ubb38\uc11c", "icon": "book-open", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/introduction", "ko/installation", @@ -8014,31 +8031,31 @@ ] }, { - "group": "가이드", + "group": "\uac00\uc774\ub4dc", "pages": [ { - "group": "전략", + "group": "\uc804\ub7b5", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "에이전트 (Agents)", + "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "크루 (Crews)", + "group": "\ud06c\ub8e8 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "플로우 (Flows)", + "group": "\ud50c\ub85c\uc6b0 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -8046,21 +8063,21 @@ ] }, { - "group": "도구", + "group": "\ub3c4\uad6c", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "코딩 도구", + "group": "\ucf54\ub529 \ub3c4\uad6c", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "고급", + "group": "\uace0\uae09", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -8068,7 +8085,7 @@ ] }, { - "group": "마이그레이션", + "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -8077,7 +8094,7 @@ ] }, { - "group": "핵심 개념", + "group": "\ud575\uc2ec \uac1c\ub150", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -8098,11 +8115,12 @@ "ko/concepts/testing", "ko/concepts/cli", "ko/concepts/tools", - "ko/concepts/event-listener" + "ko/concepts/event-listener", + "ko/concepts/checkpointing" ] }, { - "group": "MCP 통합", + "group": "MCP \ud1b5\ud569", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -8114,11 +8132,11 @@ ] }, { - "group": "도구 (Tools)", + "group": "\ub3c4\uad6c (Tools)", "pages": [ "ko/tools/overview", { - "group": "파일 & 문서", + "group": "\ud30c\uc77c & \ubb38\uc11c", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -8138,7 +8156,7 @@ ] }, { - "group": "웹 스크래핑 & 브라우징", + "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -8158,7 +8176,7 @@ ] }, { - "group": "검색 및 연구", + "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -8180,7 +8198,7 @@ ] }, { - "group": "데이터베이스 & 데이터", + "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -8195,7 +8213,7 @@ ] }, { - "group": "인공지능 & 머신러닝", + "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -8209,7 +8227,7 @@ ] }, { - "group": "클라우드 & 스토리지", + "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -8228,7 +8246,7 @@ ] }, { - "group": "자동화", + "group": "\uc790\ub3d9\ud654", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -8263,7 +8281,7 @@ ] }, { - "group": "학습", + "group": "\ud559\uc2b5", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -8300,17 +8318,17 @@ ] }, { - "tab": "엔터프라이즈", + "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", "icon": "briefcase", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "빌드", + "group": "\ube4c\ub4dc", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -8321,7 +8339,7 @@ ] }, { - "group": "운영", + "group": "\uc6b4\uc601", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -8330,13 +8348,13 @@ ] }, { - "group": "관리", + "group": "\uad00\ub9ac", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "통합 문서", + "group": "\ud1b5\ud569 \ubb38\uc11c", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -8388,7 +8406,7 @@ ] }, { - "group": "트리거", + "group": "\ud2b8\ub9ac\uac70", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -8404,7 +8422,7 @@ ] }, { - "group": "학습 자원", + "group": "\ud559\uc2b5 \uc790\uc6d0", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -8412,11 +8430,11 @@ ] }, { - "tab": "API 레퍼런스", + "tab": "API \ub808\ud37c\ub7f0\uc2a4", "icon": "magnifying-glass", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -8428,11 +8446,11 @@ ] }, { - "tab": "예시", + "tab": "\uc608\uc2dc", "icon": "code", "groups": [ { - "group": "예시", + "group": "\uc608\uc2dc", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -8441,11 +8459,11 @@ ] }, { - "tab": "변경 로그", + "tab": "\ubcc0\uacbd \ub85c\uadf8", "icon": "clock", "groups": [ { - "group": "릴리스 노트", + "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", "pages": [ "ko/changelog" ] @@ -8458,11 +8476,11 @@ "version": "v1.12.1", "tabs": [ { - "tab": "홈", + "tab": "\ud648", "icon": "house", "groups": [ { - "group": "환영합니다", + "group": "\ud658\uc601\ud569\ub2c8\ub2e4", "pages": [ "ko/index" ] @@ -8470,11 +8488,11 @@ ] }, { - "tab": "기술 문서", + "tab": "\uae30\uc220 \ubb38\uc11c", "icon": "book-open", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/introduction", "ko/installation", @@ -8482,31 +8500,31 @@ ] }, { - "group": "가이드", + "group": "\uac00\uc774\ub4dc", "pages": [ { - "group": "전략", + "group": "\uc804\ub7b5", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "에이전트 (Agents)", + "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "크루 (Crews)", + "group": "\ud06c\ub8e8 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "플로우 (Flows)", + "group": "\ud50c\ub85c\uc6b0 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -8514,21 +8532,21 @@ ] }, { - "group": "도구", + "group": "\ub3c4\uad6c", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "코딩 도구", + "group": "\ucf54\ub529 \ub3c4\uad6c", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "고급", + "group": "\uace0\uae09", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -8536,7 +8554,7 @@ ] }, { - "group": "마이그레이션", + "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -8545,7 +8563,7 @@ ] }, { - "group": "핵심 개념", + "group": "\ud575\uc2ec \uac1c\ub150", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -8565,11 +8583,12 @@ "ko/concepts/testing", "ko/concepts/cli", "ko/concepts/tools", - "ko/concepts/event-listener" + "ko/concepts/event-listener", + "ko/concepts/checkpointing" ] }, { - "group": "MCP 통합", + "group": "MCP \ud1b5\ud569", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -8581,11 +8600,11 @@ ] }, { - "group": "도구 (Tools)", + "group": "\ub3c4\uad6c (Tools)", "pages": [ "ko/tools/overview", { - "group": "파일 & 문서", + "group": "\ud30c\uc77c & \ubb38\uc11c", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -8605,7 +8624,7 @@ ] }, { - "group": "웹 스크래핑 & 브라우징", + "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -8625,7 +8644,7 @@ ] }, { - "group": "검색 및 연구", + "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -8647,7 +8666,7 @@ ] }, { - "group": "데이터베이스 & 데이터", + "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -8662,7 +8681,7 @@ ] }, { - "group": "인공지능 & 머신러닝", + "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -8676,7 +8695,7 @@ ] }, { - "group": "클라우드 & 스토리지", + "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -8695,7 +8714,7 @@ ] }, { - "group": "자동화", + "group": "\uc790\ub3d9\ud654", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -8730,7 +8749,7 @@ ] }, { - "group": "학습", + "group": "\ud559\uc2b5", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -8767,17 +8786,17 @@ ] }, { - "tab": "엔터프라이즈", + "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", "icon": "briefcase", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "빌드", + "group": "\ube4c\ub4dc", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -8788,7 +8807,7 @@ ] }, { - "group": "운영", + "group": "\uc6b4\uc601", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -8797,13 +8816,13 @@ ] }, { - "group": "관리", + "group": "\uad00\ub9ac", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "통합 문서", + "group": "\ud1b5\ud569 \ubb38\uc11c", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -8855,7 +8874,7 @@ ] }, { - "group": "트리거", + "group": "\ud2b8\ub9ac\uac70", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -8871,7 +8890,7 @@ ] }, { - "group": "학습 자원", + "group": "\ud559\uc2b5 \uc790\uc6d0", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -8879,11 +8898,11 @@ ] }, { - "tab": "API 레퍼런스", + "tab": "API \ub808\ud37c\ub7f0\uc2a4", "icon": "magnifying-glass", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -8895,11 +8914,11 @@ ] }, { - "tab": "예시", + "tab": "\uc608\uc2dc", "icon": "code", "groups": [ { - "group": "예시", + "group": "\uc608\uc2dc", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -8908,11 +8927,11 @@ ] }, { - "tab": "변경 로그", + "tab": "\ubcc0\uacbd \ub85c\uadf8", "icon": "clock", "groups": [ { - "group": "릴리스 노트", + "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", "pages": [ "ko/changelog" ] @@ -8925,11 +8944,11 @@ "version": "v1.12.0", "tabs": [ { - "tab": "홈", + "tab": "\ud648", "icon": "house", "groups": [ { - "group": "환영합니다", + "group": "\ud658\uc601\ud569\ub2c8\ub2e4", "pages": [ "ko/index" ] @@ -8937,11 +8956,11 @@ ] }, { - "tab": "기술 문서", + "tab": "\uae30\uc220 \ubb38\uc11c", "icon": "book-open", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/introduction", "ko/installation", @@ -8949,31 +8968,31 @@ ] }, { - "group": "가이드", + "group": "\uac00\uc774\ub4dc", "pages": [ { - "group": "전략", + "group": "\uc804\ub7b5", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "에이전트 (Agents)", + "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "크루 (Crews)", + "group": "\ud06c\ub8e8 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "플로우 (Flows)", + "group": "\ud50c\ub85c\uc6b0 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -8981,21 +9000,21 @@ ] }, { - "group": "도구", + "group": "\ub3c4\uad6c", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "코딩 도구", + "group": "\ucf54\ub529 \ub3c4\uad6c", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "고급", + "group": "\uace0\uae09", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -9003,7 +9022,7 @@ ] }, { - "group": "마이그레이션", + "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -9012,7 +9031,7 @@ ] }, { - "group": "핵심 개념", + "group": "\ud575\uc2ec \uac1c\ub150", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -9032,11 +9051,12 @@ "ko/concepts/testing", "ko/concepts/cli", "ko/concepts/tools", - "ko/concepts/event-listener" + "ko/concepts/event-listener", + "ko/concepts/checkpointing" ] }, { - "group": "MCP 통합", + "group": "MCP \ud1b5\ud569", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -9048,11 +9068,11 @@ ] }, { - "group": "도구 (Tools)", + "group": "\ub3c4\uad6c (Tools)", "pages": [ "ko/tools/overview", { - "group": "파일 & 문서", + "group": "\ud30c\uc77c & \ubb38\uc11c", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -9072,7 +9092,7 @@ ] }, { - "group": "웹 스크래핑 & 브라우징", + "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -9092,7 +9112,7 @@ ] }, { - "group": "검색 및 연구", + "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -9114,7 +9134,7 @@ ] }, { - "group": "데이터베이스 & 데이터", + "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -9129,7 +9149,7 @@ ] }, { - "group": "인공지능 & 머신러닝", + "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -9143,7 +9163,7 @@ ] }, { - "group": "클라우드 & 스토리지", + "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -9162,7 +9182,7 @@ ] }, { - "group": "자동화", + "group": "\uc790\ub3d9\ud654", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -9197,7 +9217,7 @@ ] }, { - "group": "학습", + "group": "\ud559\uc2b5", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -9234,17 +9254,17 @@ ] }, { - "tab": "엔터프라이즈", + "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", "icon": "briefcase", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "빌드", + "group": "\ube4c\ub4dc", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -9255,7 +9275,7 @@ ] }, { - "group": "운영", + "group": "\uc6b4\uc601", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -9264,13 +9284,13 @@ ] }, { - "group": "관리", + "group": "\uad00\ub9ac", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "통합 문서", + "group": "\ud1b5\ud569 \ubb38\uc11c", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -9322,7 +9342,7 @@ ] }, { - "group": "트리거", + "group": "\ud2b8\ub9ac\uac70", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -9338,7 +9358,7 @@ ] }, { - "group": "학습 자원", + "group": "\ud559\uc2b5 \uc790\uc6d0", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -9346,11 +9366,11 @@ ] }, { - "tab": "API 레퍼런스", + "tab": "API \ub808\ud37c\ub7f0\uc2a4", "icon": "magnifying-glass", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -9362,11 +9382,11 @@ ] }, { - "tab": "예시", + "tab": "\uc608\uc2dc", "icon": "code", "groups": [ { - "group": "예시", + "group": "\uc608\uc2dc", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -9375,11 +9395,11 @@ ] }, { - "tab": "변경 로그", + "tab": "\ubcc0\uacbd \ub85c\uadf8", "icon": "clock", "groups": [ { - "group": "릴리스 노트", + "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", "pages": [ "ko/changelog" ] @@ -9392,11 +9412,11 @@ "version": "v1.11.1", "tabs": [ { - "tab": "홈", + "tab": "\ud648", "icon": "house", "groups": [ { - "group": "환영합니다", + "group": "\ud658\uc601\ud569\ub2c8\ub2e4", "pages": [ "ko/index" ] @@ -9404,11 +9424,11 @@ ] }, { - "tab": "기술 문서", + "tab": "\uae30\uc220 \ubb38\uc11c", "icon": "book-open", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/introduction", "ko/installation", @@ -9416,31 +9436,31 @@ ] }, { - "group": "가이드", + "group": "\uac00\uc774\ub4dc", "pages": [ { - "group": "전략", + "group": "\uc804\ub7b5", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "에이전트 (Agents)", + "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "크루 (Crews)", + "group": "\ud06c\ub8e8 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "플로우 (Flows)", + "group": "\ud50c\ub85c\uc6b0 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -9448,21 +9468,21 @@ ] }, { - "group": "도구", + "group": "\ub3c4\uad6c", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "코딩 도구", + "group": "\ucf54\ub529 \ub3c4\uad6c", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "고급", + "group": "\uace0\uae09", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -9470,7 +9490,7 @@ ] }, { - "group": "마이그레이션", + "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -9479,7 +9499,7 @@ ] }, { - "group": "핵심 개념", + "group": "\ud575\uc2ec \uac1c\ub150", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -9499,11 +9519,12 @@ "ko/concepts/testing", "ko/concepts/cli", "ko/concepts/tools", - "ko/concepts/event-listener" + "ko/concepts/event-listener", + "ko/concepts/checkpointing" ] }, { - "group": "MCP 통합", + "group": "MCP \ud1b5\ud569", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -9515,11 +9536,11 @@ ] }, { - "group": "도구 (Tools)", + "group": "\ub3c4\uad6c (Tools)", "pages": [ "ko/tools/overview", { - "group": "파일 & 문서", + "group": "\ud30c\uc77c & \ubb38\uc11c", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -9539,7 +9560,7 @@ ] }, { - "group": "웹 스크래핑 & 브라우징", + "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -9559,7 +9580,7 @@ ] }, { - "group": "검색 및 연구", + "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -9581,7 +9602,7 @@ ] }, { - "group": "데이터베이스 & 데이터", + "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -9596,7 +9617,7 @@ ] }, { - "group": "인공지능 & 머신러닝", + "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -9610,7 +9631,7 @@ ] }, { - "group": "클라우드 & 스토리지", + "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -9629,7 +9650,7 @@ ] }, { - "group": "자동화", + "group": "\uc790\ub3d9\ud654", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -9664,7 +9685,7 @@ ] }, { - "group": "학습", + "group": "\ud559\uc2b5", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -9701,17 +9722,17 @@ ] }, { - "tab": "엔터프라이즈", + "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", "icon": "briefcase", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "빌드", + "group": "\ube4c\ub4dc", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -9722,7 +9743,7 @@ ] }, { - "group": "운영", + "group": "\uc6b4\uc601", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -9731,13 +9752,13 @@ ] }, { - "group": "관리", + "group": "\uad00\ub9ac", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "통합 문서", + "group": "\ud1b5\ud569 \ubb38\uc11c", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -9789,7 +9810,7 @@ ] }, { - "group": "트리거", + "group": "\ud2b8\ub9ac\uac70", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -9805,7 +9826,7 @@ ] }, { - "group": "학습 자원", + "group": "\ud559\uc2b5 \uc790\uc6d0", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -9813,11 +9834,11 @@ ] }, { - "tab": "API 레퍼런스", + "tab": "API \ub808\ud37c\ub7f0\uc2a4", "icon": "magnifying-glass", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -9829,11 +9850,11 @@ ] }, { - "tab": "예시", + "tab": "\uc608\uc2dc", "icon": "code", "groups": [ { - "group": "예시", + "group": "\uc608\uc2dc", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -9842,11 +9863,11 @@ ] }, { - "tab": "변경 로그", + "tab": "\ubcc0\uacbd \ub85c\uadf8", "icon": "clock", "groups": [ { - "group": "릴리스 노트", + "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", "pages": [ "ko/changelog" ] @@ -9859,11 +9880,11 @@ "version": "v1.11.0", "tabs": [ { - "tab": "홈", + "tab": "\ud648", "icon": "house", "groups": [ { - "group": "환영합니다", + "group": "\ud658\uc601\ud569\ub2c8\ub2e4", "pages": [ "ko/index" ] @@ -9871,11 +9892,11 @@ ] }, { - "tab": "기술 문서", + "tab": "\uae30\uc220 \ubb38\uc11c", "icon": "book-open", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/introduction", "ko/installation", @@ -9883,31 +9904,31 @@ ] }, { - "group": "가이드", + "group": "\uac00\uc774\ub4dc", "pages": [ { - "group": "전략", + "group": "\uc804\ub7b5", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "에이전트 (Agents)", + "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "크루 (Crews)", + "group": "\ud06c\ub8e8 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "플로우 (Flows)", + "group": "\ud50c\ub85c\uc6b0 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -9915,21 +9936,21 @@ ] }, { - "group": "도구", + "group": "\ub3c4\uad6c", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "코딩 도구", + "group": "\ucf54\ub529 \ub3c4\uad6c", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "고급", + "group": "\uace0\uae09", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -9937,7 +9958,7 @@ ] }, { - "group": "마이그레이션", + "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -9946,7 +9967,7 @@ ] }, { - "group": "핵심 개념", + "group": "\ud575\uc2ec \uac1c\ub150", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -9965,11 +9986,12 @@ "ko/concepts/testing", "ko/concepts/cli", "ko/concepts/tools", - "ko/concepts/event-listener" + "ko/concepts/event-listener", + "ko/concepts/checkpointing" ] }, { - "group": "MCP 통합", + "group": "MCP \ud1b5\ud569", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -9981,11 +10003,11 @@ ] }, { - "group": "도구 (Tools)", + "group": "\ub3c4\uad6c (Tools)", "pages": [ "ko/tools/overview", { - "group": "파일 & 문서", + "group": "\ud30c\uc77c & \ubb38\uc11c", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -10005,7 +10027,7 @@ ] }, { - "group": "웹 스크래핑 & 브라우징", + "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -10025,7 +10047,7 @@ ] }, { - "group": "검색 및 연구", + "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -10047,7 +10069,7 @@ ] }, { - "group": "데이터베이스 & 데이터", + "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -10062,7 +10084,7 @@ ] }, { - "group": "인공지능 & 머신러닝", + "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -10076,7 +10098,7 @@ ] }, { - "group": "클라우드 & 스토리지", + "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -10095,7 +10117,7 @@ ] }, { - "group": "자동화", + "group": "\uc790\ub3d9\ud654", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -10130,7 +10152,7 @@ ] }, { - "group": "학습", + "group": "\ud559\uc2b5", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -10167,17 +10189,17 @@ ] }, { - "tab": "엔터프라이즈", + "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", "icon": "briefcase", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "빌드", + "group": "\ube4c\ub4dc", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -10188,7 +10210,7 @@ ] }, { - "group": "운영", + "group": "\uc6b4\uc601", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -10197,13 +10219,13 @@ ] }, { - "group": "관리", + "group": "\uad00\ub9ac", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "통합 문서", + "group": "\ud1b5\ud569 \ubb38\uc11c", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -10255,7 +10277,7 @@ ] }, { - "group": "트리거", + "group": "\ud2b8\ub9ac\uac70", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -10271,7 +10293,7 @@ ] }, { - "group": "학습 자원", + "group": "\ud559\uc2b5 \uc790\uc6d0", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -10279,11 +10301,11 @@ ] }, { - "tab": "API 레퍼런스", + "tab": "API \ub808\ud37c\ub7f0\uc2a4", "icon": "magnifying-glass", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -10295,11 +10317,11 @@ ] }, { - "tab": "예시", + "tab": "\uc608\uc2dc", "icon": "code", "groups": [ { - "group": "예시", + "group": "\uc608\uc2dc", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -10308,11 +10330,11 @@ ] }, { - "tab": "변경 로그", + "tab": "\ubcc0\uacbd \ub85c\uadf8", "icon": "clock", "groups": [ { - "group": "릴리스 노트", + "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", "pages": [ "ko/changelog" ] @@ -10325,11 +10347,11 @@ "version": "v1.10.1", "tabs": [ { - "tab": "홈", + "tab": "\ud648", "icon": "house", "groups": [ { - "group": "환영합니다", + "group": "\ud658\uc601\ud569\ub2c8\ub2e4", "pages": [ "ko/index" ] @@ -10337,11 +10359,11 @@ ] }, { - "tab": "기술 문서", + "tab": "\uae30\uc220 \ubb38\uc11c", "icon": "book-open", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/introduction", "ko/installation", @@ -10349,31 +10371,31 @@ ] }, { - "group": "가이드", + "group": "\uac00\uc774\ub4dc", "pages": [ { - "group": "전략", + "group": "\uc804\ub7b5", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "에이전트 (Agents)", + "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "크루 (Crews)", + "group": "\ud06c\ub8e8 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "플로우 (Flows)", + "group": "\ud50c\ub85c\uc6b0 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -10381,21 +10403,21 @@ ] }, { - "group": "도구", + "group": "\ub3c4\uad6c", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "코딩 도구", + "group": "\ucf54\ub529 \ub3c4\uad6c", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "고급", + "group": "\uace0\uae09", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -10403,7 +10425,7 @@ ] }, { - "group": "마이그레이션", + "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -10412,7 +10434,7 @@ ] }, { - "group": "핵심 개념", + "group": "\ud575\uc2ec \uac1c\ub150", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -10431,11 +10453,12 @@ "ko/concepts/testing", "ko/concepts/cli", "ko/concepts/tools", - "ko/concepts/event-listener" + "ko/concepts/event-listener", + "ko/concepts/checkpointing" ] }, { - "group": "MCP 통합", + "group": "MCP \ud1b5\ud569", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -10447,11 +10470,11 @@ ] }, { - "group": "도구 (Tools)", + "group": "\ub3c4\uad6c (Tools)", "pages": [ "ko/tools/overview", { - "group": "파일 & 문서", + "group": "\ud30c\uc77c & \ubb38\uc11c", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -10471,7 +10494,7 @@ ] }, { - "group": "웹 스크래핑 & 브라우징", + "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -10491,7 +10514,7 @@ ] }, { - "group": "검색 및 연구", + "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -10513,7 +10536,7 @@ ] }, { - "group": "데이터베이스 & 데이터", + "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -10528,7 +10551,7 @@ ] }, { - "group": "인공지능 & 머신러닝", + "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -10542,7 +10565,7 @@ ] }, { - "group": "클라우드 & 스토리지", + "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -10561,7 +10584,7 @@ ] }, { - "group": "자동화", + "group": "\uc790\ub3d9\ud654", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -10596,7 +10619,7 @@ ] }, { - "group": "학습", + "group": "\ud559\uc2b5", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -10633,17 +10656,17 @@ ] }, { - "tab": "엔터프라이즈", + "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", "icon": "briefcase", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "빌드", + "group": "\ube4c\ub4dc", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -10654,7 +10677,7 @@ ] }, { - "group": "운영", + "group": "\uc6b4\uc601", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -10663,13 +10686,13 @@ ] }, { - "group": "관리", + "group": "\uad00\ub9ac", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "통합 문서", + "group": "\ud1b5\ud569 \ubb38\uc11c", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -10721,7 +10744,7 @@ ] }, { - "group": "트리거", + "group": "\ud2b8\ub9ac\uac70", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -10737,7 +10760,7 @@ ] }, { - "group": "학습 자원", + "group": "\ud559\uc2b5 \uc790\uc6d0", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -10745,11 +10768,11 @@ ] }, { - "tab": "API 레퍼런스", + "tab": "API \ub808\ud37c\ub7f0\uc2a4", "icon": "magnifying-glass", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -10761,11 +10784,11 @@ ] }, { - "tab": "예시", + "tab": "\uc608\uc2dc", "icon": "code", "groups": [ { - "group": "예시", + "group": "\uc608\uc2dc", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -10774,11 +10797,11 @@ ] }, { - "tab": "변경 로그", + "tab": "\ubcc0\uacbd \ub85c\uadf8", "icon": "clock", "groups": [ { - "group": "릴리스 노트", + "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", "pages": [ "ko/changelog" ] @@ -10791,11 +10814,11 @@ "version": "v1.10.0", "tabs": [ { - "tab": "홈", + "tab": "\ud648", "icon": "house", "groups": [ { - "group": "환영합니다", + "group": "\ud658\uc601\ud569\ub2c8\ub2e4", "pages": [ "ko/index" ] @@ -10803,11 +10826,11 @@ ] }, { - "tab": "기술 문서", + "tab": "\uae30\uc220 \ubb38\uc11c", "icon": "book-open", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/introduction", "ko/installation", @@ -10815,31 +10838,31 @@ ] }, { - "group": "가이드", + "group": "\uac00\uc774\ub4dc", "pages": [ { - "group": "전략", + "group": "\uc804\ub7b5", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "에이전트 (Agents)", + "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "크루 (Crews)", + "group": "\ud06c\ub8e8 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "플로우 (Flows)", + "group": "\ud50c\ub85c\uc6b0 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -10847,21 +10870,21 @@ ] }, { - "group": "도구", + "group": "\ub3c4\uad6c", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "코딩 도구", + "group": "\ucf54\ub529 \ub3c4\uad6c", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "고급", + "group": "\uace0\uae09", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -10869,7 +10892,7 @@ ] }, { - "group": "마이그레이션", + "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -10878,7 +10901,7 @@ ] }, { - "group": "핵심 개념", + "group": "\ud575\uc2ec \uac1c\ub150", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -10898,11 +10921,12 @@ "ko/concepts/testing", "ko/concepts/cli", "ko/concepts/tools", - "ko/concepts/event-listener" + "ko/concepts/event-listener", + "ko/concepts/checkpointing" ] }, { - "group": "MCP 통합", + "group": "MCP \ud1b5\ud569", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -10914,11 +10938,11 @@ ] }, { - "group": "도구 (Tools)", + "group": "\ub3c4\uad6c (Tools)", "pages": [ "ko/tools/overview", { - "group": "파일 & 문서", + "group": "\ud30c\uc77c & \ubb38\uc11c", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -10938,7 +10962,7 @@ ] }, { - "group": "웹 스크래핑 & 브라우징", + "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -10958,7 +10982,7 @@ ] }, { - "group": "검색 및 연구", + "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -10980,7 +11004,7 @@ ] }, { - "group": "데이터베이스 & 데이터", + "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -10995,7 +11019,7 @@ ] }, { - "group": "인공지능 & 머신러닝", + "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -11009,7 +11033,7 @@ ] }, { - "group": "클라우드 & 스토리지", + "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -11028,7 +11052,7 @@ ] }, { - "group": "자동화", + "group": "\uc790\ub3d9\ud654", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -11063,7 +11087,7 @@ ] }, { - "group": "학습", + "group": "\ud559\uc2b5", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -11100,17 +11124,17 @@ ] }, { - "tab": "엔터프라이즈", + "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", "icon": "briefcase", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "빌드", + "group": "\ube4c\ub4dc", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -11121,7 +11145,7 @@ ] }, { - "group": "운영", + "group": "\uc6b4\uc601", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -11130,13 +11154,13 @@ ] }, { - "group": "관리", + "group": "\uad00\ub9ac", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "통합 문서", + "group": "\ud1b5\ud569 \ubb38\uc11c", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -11188,7 +11212,7 @@ ] }, { - "group": "트리거", + "group": "\ud2b8\ub9ac\uac70", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -11204,7 +11228,7 @@ ] }, { - "group": "학습 자원", + "group": "\ud559\uc2b5 \uc790\uc6d0", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -11212,11 +11236,11 @@ ] }, { - "tab": "API 레퍼런스", + "tab": "API \ub808\ud37c\ub7f0\uc2a4", "icon": "magnifying-glass", "groups": [ { - "group": "시작 안내", + "group": "\uc2dc\uc791 \uc548\ub0b4", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -11228,11 +11252,11 @@ ] }, { - "tab": "예시", + "tab": "\uc608\uc2dc", "icon": "code", "groups": [ { - "group": "예시", + "group": "\uc608\uc2dc", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -11241,11 +11265,11 @@ ] }, { - "tab": "변경 로그", + "tab": "\ubcc0\uacbd \ub85c\uadf8", "icon": "clock", "groups": [ { - "group": "릴리스 노트", + "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", "pages": [ "ko/changelog" ] @@ -11261,17 +11285,17 @@ "global": { "anchors": [ { - "anchor": "الموقع", + "anchor": "\u0627\u0644\u0645\u0648\u0642\u0639", "href": "https://crewai.com", "icon": "globe" }, { - "anchor": "المنتدى", + "anchor": "\u0627\u0644\u0645\u0646\u062a\u062f\u0649", "href": "https://community.crewai.com", "icon": "discourse" }, { - "anchor": "المدوّنة", + "anchor": "\u0627\u0644\u0645\u062f\u0648\u0651\u0646\u0629", "href": "https://blog.crewai.com", "icon": "newspaper" }, @@ -11288,11 +11312,11 @@ "default": true, "tabs": [ { - "tab": "الرئيسية", + "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", "icon": "house", "groups": [ { - "group": "مرحباً", + "group": "\u0645\u0631\u062d\u0628\u0627\u064b", "pages": [ "ar/index" ] @@ -11300,11 +11324,11 @@ ] }, { - "tab": "التقنية التوثيق", + "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", "icon": "book-open", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/introduction", "ar/installation", @@ -11312,31 +11336,31 @@ ] }, { - "group": "الأدلّة", + "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", "pages": [ { - "group": "الاستراتيجية", + "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "الوكلاء", + "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "الطواقم", + "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "التدفقات", + "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -11344,21 +11368,21 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "أدوات البرمجة", + "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "متقدّم", + "group": "\u0645\u062a\u0642\u062f\u0651\u0645", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -11366,7 +11390,7 @@ ] }, { - "group": "الترحيل", + "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -11375,7 +11399,7 @@ ] }, { - "group": "المفاهيم الأساسية", + "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", "pages": [ "ar/concepts/agents", "ar/concepts/agent-capabilities", @@ -11396,11 +11420,12 @@ "ar/concepts/testing", "ar/concepts/cli", "ar/concepts/tools", - "ar/concepts/event-listener" + "ar/concepts/event-listener", + "ar/concepts/checkpointing" ] }, { - "group": "تكامل MCP", + "group": "\u062a\u0643\u0627\u0645\u0644 MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -11412,11 +11437,11 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "pages": [ "ar/tools/overview", { - "group": "الملفات والمستندات", + "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -11436,7 +11461,7 @@ ] }, { - "group": "استخراج بيانات الويب", + "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -11456,7 +11481,7 @@ ] }, { - "group": "البحث والاستكشاف", + "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -11478,7 +11503,7 @@ ] }, { - "group": "قواعد البيانات", + "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -11493,7 +11518,7 @@ ] }, { - "group": "الذكاء الاصطناعي والتعلّم الآلي", + "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -11507,7 +11532,7 @@ ] }, { - "group": "التخزين السحابي", + "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -11526,7 +11551,7 @@ ] }, { - "group": "الأتمتة", + "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -11561,7 +11586,7 @@ ] }, { - "group": "التعلّم", + "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -11598,17 +11623,17 @@ ] }, { - "tab": "المؤسسات", + "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", "icon": "briefcase", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "البناء", + "group": "\u0627\u0644\u0628\u0646\u0627\u0621", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -11619,7 +11644,7 @@ ] }, { - "group": "العمليات", + "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -11628,13 +11653,13 @@ ] }, { - "group": "الإدارة", + "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "التكاملات", + "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -11686,7 +11711,7 @@ ] }, { - "group": "المشغّلات", + "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -11702,7 +11727,7 @@ ] }, { - "group": "موارد التعلّم", + "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -11710,11 +11735,11 @@ ] }, { - "tab": "API المرجع", + "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", "icon": "magnifying-glass", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -11726,11 +11751,11 @@ ] }, { - "tab": "أمثلة", + "tab": "\u0623\u0645\u062b\u0644\u0629", "icon": "code", "groups": [ { - "group": "أمثلة", + "group": "\u0623\u0645\u062b\u0644\u0629", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -11739,11 +11764,11 @@ ] }, { - "tab": "التغييرات السجلات", + "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", "icon": "clock", "groups": [ { - "group": "سجل التغييرات", + "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", "pages": [ "ar/changelog" ] @@ -11756,11 +11781,11 @@ "version": "v1.12.2", "tabs": [ { - "tab": "الرئيسية", + "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", "icon": "house", "groups": [ { - "group": "مرحباً", + "group": "\u0645\u0631\u062d\u0628\u0627\u064b", "pages": [ "ar/index" ] @@ -11768,11 +11793,11 @@ ] }, { - "tab": "التقنية التوثيق", + "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", "icon": "book-open", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/introduction", "ar/installation", @@ -11780,31 +11805,31 @@ ] }, { - "group": "الأدلّة", + "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", "pages": [ { - "group": "الاستراتيجية", + "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "الوكلاء", + "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "الطواقم", + "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "التدفقات", + "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -11812,21 +11837,21 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "أدوات البرمجة", + "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "متقدّم", + "group": "\u0645\u062a\u0642\u062f\u0651\u0645", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -11834,7 +11859,7 @@ ] }, { - "group": "الترحيل", + "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -11843,7 +11868,7 @@ ] }, { - "group": "المفاهيم الأساسية", + "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", "pages": [ "ar/concepts/agents", "ar/concepts/agent-capabilities", @@ -11864,11 +11889,12 @@ "ar/concepts/testing", "ar/concepts/cli", "ar/concepts/tools", - "ar/concepts/event-listener" + "ar/concepts/event-listener", + "ar/concepts/checkpointing" ] }, { - "group": "تكامل MCP", + "group": "\u062a\u0643\u0627\u0645\u0644 MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -11880,11 +11906,11 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "pages": [ "ar/tools/overview", { - "group": "الملفات والمستندات", + "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -11904,7 +11930,7 @@ ] }, { - "group": "استخراج بيانات الويب", + "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -11924,7 +11950,7 @@ ] }, { - "group": "البحث والاستكشاف", + "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -11946,7 +11972,7 @@ ] }, { - "group": "قواعد البيانات", + "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -11961,7 +11987,7 @@ ] }, { - "group": "الذكاء الاصطناعي والتعلّم الآلي", + "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -11975,7 +12001,7 @@ ] }, { - "group": "التخزين السحابي", + "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -11994,7 +12020,7 @@ ] }, { - "group": "الأتمتة", + "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -12029,7 +12055,7 @@ ] }, { - "group": "التعلّم", + "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -12066,17 +12092,17 @@ ] }, { - "tab": "المؤسسات", + "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", "icon": "briefcase", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "البناء", + "group": "\u0627\u0644\u0628\u0646\u0627\u0621", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -12087,7 +12113,7 @@ ] }, { - "group": "العمليات", + "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -12096,13 +12122,13 @@ ] }, { - "group": "الإدارة", + "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "التكاملات", + "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -12154,7 +12180,7 @@ ] }, { - "group": "المشغّلات", + "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -12170,7 +12196,7 @@ ] }, { - "group": "موارد التعلّم", + "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -12178,11 +12204,11 @@ ] }, { - "tab": "API المرجع", + "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", "icon": "magnifying-glass", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -12194,11 +12220,11 @@ ] }, { - "tab": "أمثلة", + "tab": "\u0623\u0645\u062b\u0644\u0629", "icon": "code", "groups": [ { - "group": "أمثلة", + "group": "\u0623\u0645\u062b\u0644\u0629", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -12207,11 +12233,11 @@ ] }, { - "tab": "التغييرات السجلات", + "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", "icon": "clock", "groups": [ { - "group": "سجل التغييرات", + "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", "pages": [ "ar/changelog" ] @@ -12224,11 +12250,11 @@ "version": "v1.12.1", "tabs": [ { - "tab": "الرئيسية", + "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", "icon": "house", "groups": [ { - "group": "مرحباً", + "group": "\u0645\u0631\u062d\u0628\u0627\u064b", "pages": [ "ar/index" ] @@ -12236,11 +12262,11 @@ ] }, { - "tab": "التقنية التوثيق", + "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", "icon": "book-open", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/introduction", "ar/installation", @@ -12248,31 +12274,31 @@ ] }, { - "group": "الأدلّة", + "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", "pages": [ { - "group": "الاستراتيجية", + "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "الوكلاء", + "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "الطواقم", + "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "التدفقات", + "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -12280,21 +12306,21 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "أدوات البرمجة", + "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "متقدّم", + "group": "\u0645\u062a\u0642\u062f\u0651\u0645", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -12302,7 +12328,7 @@ ] }, { - "group": "الترحيل", + "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -12311,7 +12337,7 @@ ] }, { - "group": "المفاهيم الأساسية", + "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", "pages": [ "ar/concepts/agents", "ar/concepts/tasks", @@ -12331,11 +12357,12 @@ "ar/concepts/testing", "ar/concepts/cli", "ar/concepts/tools", - "ar/concepts/event-listener" + "ar/concepts/event-listener", + "ar/concepts/checkpointing" ] }, { - "group": "تكامل MCP", + "group": "\u062a\u0643\u0627\u0645\u0644 MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -12347,11 +12374,11 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "pages": [ "ar/tools/overview", { - "group": "الملفات والمستندات", + "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -12371,7 +12398,7 @@ ] }, { - "group": "استخراج بيانات الويب", + "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -12391,7 +12418,7 @@ ] }, { - "group": "البحث والاستكشاف", + "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -12413,7 +12440,7 @@ ] }, { - "group": "قواعد البيانات", + "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -12428,7 +12455,7 @@ ] }, { - "group": "الذكاء الاصطناعي والتعلّم الآلي", + "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -12442,7 +12469,7 @@ ] }, { - "group": "التخزين السحابي", + "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -12461,7 +12488,7 @@ ] }, { - "group": "الأتمتة", + "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -12496,7 +12523,7 @@ ] }, { - "group": "التعلّم", + "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -12533,17 +12560,17 @@ ] }, { - "tab": "المؤسسات", + "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", "icon": "briefcase", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "البناء", + "group": "\u0627\u0644\u0628\u0646\u0627\u0621", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -12554,7 +12581,7 @@ ] }, { - "group": "العمليات", + "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -12563,13 +12590,13 @@ ] }, { - "group": "الإدارة", + "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "التكاملات", + "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -12621,7 +12648,7 @@ ] }, { - "group": "المشغّلات", + "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -12637,7 +12664,7 @@ ] }, { - "group": "موارد التعلّم", + "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -12645,11 +12672,11 @@ ] }, { - "tab": "API المرجع", + "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", "icon": "magnifying-glass", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -12661,11 +12688,11 @@ ] }, { - "tab": "أمثلة", + "tab": "\u0623\u0645\u062b\u0644\u0629", "icon": "code", "groups": [ { - "group": "أمثلة", + "group": "\u0623\u0645\u062b\u0644\u0629", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -12674,11 +12701,11 @@ ] }, { - "tab": "التغييرات السجلات", + "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", "icon": "clock", "groups": [ { - "group": "سجل التغييرات", + "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", "pages": [ "ar/changelog" ] @@ -12691,11 +12718,11 @@ "version": "v1.12.0", "tabs": [ { - "tab": "الرئيسية", + "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", "icon": "house", "groups": [ { - "group": "مرحباً", + "group": "\u0645\u0631\u062d\u0628\u0627\u064b", "pages": [ "ar/index" ] @@ -12703,11 +12730,11 @@ ] }, { - "tab": "التقنية التوثيق", + "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", "icon": "book-open", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/introduction", "ar/installation", @@ -12715,31 +12742,31 @@ ] }, { - "group": "الأدلّة", + "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", "pages": [ { - "group": "الاستراتيجية", + "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "الوكلاء", + "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "الطواقم", + "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "التدفقات", + "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -12747,21 +12774,21 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "أدوات البرمجة", + "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "متقدّم", + "group": "\u0645\u062a\u0642\u062f\u0651\u0645", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -12769,7 +12796,7 @@ ] }, { - "group": "الترحيل", + "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -12778,7 +12805,7 @@ ] }, { - "group": "المفاهيم الأساسية", + "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", "pages": [ "ar/concepts/agents", "ar/concepts/tasks", @@ -12798,11 +12825,12 @@ "ar/concepts/testing", "ar/concepts/cli", "ar/concepts/tools", - "ar/concepts/event-listener" + "ar/concepts/event-listener", + "ar/concepts/checkpointing" ] }, { - "group": "تكامل MCP", + "group": "\u062a\u0643\u0627\u0645\u0644 MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -12814,11 +12842,11 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "pages": [ "ar/tools/overview", { - "group": "الملفات والمستندات", + "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -12838,7 +12866,7 @@ ] }, { - "group": "استخراج بيانات الويب", + "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -12858,7 +12886,7 @@ ] }, { - "group": "البحث والاستكشاف", + "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -12880,7 +12908,7 @@ ] }, { - "group": "قواعد البيانات", + "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -12895,7 +12923,7 @@ ] }, { - "group": "الذكاء الاصطناعي والتعلّم الآلي", + "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -12909,7 +12937,7 @@ ] }, { - "group": "التخزين السحابي", + "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -12928,7 +12956,7 @@ ] }, { - "group": "الأتمتة", + "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -12963,7 +12991,7 @@ ] }, { - "group": "التعلّم", + "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -13000,17 +13028,17 @@ ] }, { - "tab": "المؤسسات", + "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", "icon": "briefcase", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "البناء", + "group": "\u0627\u0644\u0628\u0646\u0627\u0621", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -13021,7 +13049,7 @@ ] }, { - "group": "العمليات", + "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -13030,13 +13058,13 @@ ] }, { - "group": "الإدارة", + "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "التكاملات", + "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -13088,7 +13116,7 @@ ] }, { - "group": "المشغّلات", + "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -13104,7 +13132,7 @@ ] }, { - "group": "موارد التعلّم", + "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -13112,11 +13140,11 @@ ] }, { - "tab": "API المرجع", + "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", "icon": "magnifying-glass", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -13128,11 +13156,11 @@ ] }, { - "tab": "أمثلة", + "tab": "\u0623\u0645\u062b\u0644\u0629", "icon": "code", "groups": [ { - "group": "أمثلة", + "group": "\u0623\u0645\u062b\u0644\u0629", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -13141,11 +13169,11 @@ ] }, { - "tab": "التغييرات السجلات", + "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", "icon": "clock", "groups": [ { - "group": "سجل التغييرات", + "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", "pages": [ "ar/changelog" ] @@ -13158,11 +13186,11 @@ "version": "v1.11.1", "tabs": [ { - "tab": "الرئيسية", + "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", "icon": "house", "groups": [ { - "group": "مرحباً", + "group": "\u0645\u0631\u062d\u0628\u0627\u064b", "pages": [ "ar/index" ] @@ -13170,11 +13198,11 @@ ] }, { - "tab": "التقنية التوثيق", + "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", "icon": "book-open", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/introduction", "ar/installation", @@ -13182,31 +13210,31 @@ ] }, { - "group": "الأدلّة", + "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", "pages": [ { - "group": "الاستراتيجية", + "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "الوكلاء", + "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "الطواقم", + "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "التدفقات", + "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -13214,21 +13242,21 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "أدوات البرمجة", + "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "متقدّم", + "group": "\u0645\u062a\u0642\u062f\u0651\u0645", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -13236,7 +13264,7 @@ ] }, { - "group": "الترحيل", + "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -13245,7 +13273,7 @@ ] }, { - "group": "المفاهيم الأساسية", + "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", "pages": [ "ar/concepts/agents", "ar/concepts/tasks", @@ -13265,11 +13293,12 @@ "ar/concepts/testing", "ar/concepts/cli", "ar/concepts/tools", - "ar/concepts/event-listener" + "ar/concepts/event-listener", + "ar/concepts/checkpointing" ] }, { - "group": "تكامل MCP", + "group": "\u062a\u0643\u0627\u0645\u0644 MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -13281,11 +13310,11 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "pages": [ "ar/tools/overview", { - "group": "الملفات والمستندات", + "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -13305,7 +13334,7 @@ ] }, { - "group": "استخراج بيانات الويب", + "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -13325,7 +13354,7 @@ ] }, { - "group": "البحث والاستكشاف", + "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -13347,7 +13376,7 @@ ] }, { - "group": "قواعد البيانات", + "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -13362,7 +13391,7 @@ ] }, { - "group": "الذكاء الاصطناعي والتعلّم الآلي", + "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -13376,7 +13405,7 @@ ] }, { - "group": "التخزين السحابي", + "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -13395,7 +13424,7 @@ ] }, { - "group": "الأتمتة", + "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -13430,7 +13459,7 @@ ] }, { - "group": "التعلّم", + "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -13467,17 +13496,17 @@ ] }, { - "tab": "المؤسسات", + "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", "icon": "briefcase", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "البناء", + "group": "\u0627\u0644\u0628\u0646\u0627\u0621", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -13488,7 +13517,7 @@ ] }, { - "group": "العمليات", + "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -13497,13 +13526,13 @@ ] }, { - "group": "الإدارة", + "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "التكاملات", + "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -13555,7 +13584,7 @@ ] }, { - "group": "المشغّلات", + "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -13571,7 +13600,7 @@ ] }, { - "group": "موارد التعلّم", + "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -13579,11 +13608,11 @@ ] }, { - "tab": "API المرجع", + "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", "icon": "magnifying-glass", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -13595,11 +13624,11 @@ ] }, { - "tab": "أمثلة", + "tab": "\u0623\u0645\u062b\u0644\u0629", "icon": "code", "groups": [ { - "group": "أمثلة", + "group": "\u0623\u0645\u062b\u0644\u0629", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -13608,11 +13637,11 @@ ] }, { - "tab": "التغييرات السجلات", + "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", "icon": "clock", "groups": [ { - "group": "سجل التغييرات", + "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", "pages": [ "ar/changelog" ] @@ -13625,11 +13654,11 @@ "version": "v1.11.0", "tabs": [ { - "tab": "الرئيسية", + "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", "icon": "house", "groups": [ { - "group": "مرحباً", + "group": "\u0645\u0631\u062d\u0628\u0627\u064b", "pages": [ "ar/index" ] @@ -13637,11 +13666,11 @@ ] }, { - "tab": "التقنية التوثيق", + "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", "icon": "book-open", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/introduction", "ar/installation", @@ -13649,31 +13678,31 @@ ] }, { - "group": "الأدلّة", + "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", "pages": [ { - "group": "الاستراتيجية", + "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "الوكلاء", + "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "الطواقم", + "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "التدفقات", + "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -13681,21 +13710,21 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "أدوات البرمجة", + "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "متقدّم", + "group": "\u0645\u062a\u0642\u062f\u0651\u0645", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -13703,7 +13732,7 @@ ] }, { - "group": "الترحيل", + "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -13712,7 +13741,7 @@ ] }, { - "group": "المفاهيم الأساسية", + "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", "pages": [ "ar/concepts/agents", "ar/concepts/tasks", @@ -13731,11 +13760,12 @@ "ar/concepts/testing", "ar/concepts/cli", "ar/concepts/tools", - "ar/concepts/event-listener" + "ar/concepts/event-listener", + "ar/concepts/checkpointing" ] }, { - "group": "تكامل MCP", + "group": "\u062a\u0643\u0627\u0645\u0644 MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -13747,11 +13777,11 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "pages": [ "ar/tools/overview", { - "group": "الملفات والمستندات", + "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -13771,7 +13801,7 @@ ] }, { - "group": "استخراج بيانات الويب", + "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -13791,7 +13821,7 @@ ] }, { - "group": "البحث والاستكشاف", + "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -13813,7 +13843,7 @@ ] }, { - "group": "قواعد البيانات", + "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -13828,7 +13858,7 @@ ] }, { - "group": "الذكاء الاصطناعي والتعلّم الآلي", + "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -13842,7 +13872,7 @@ ] }, { - "group": "التخزين السحابي", + "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -13861,7 +13891,7 @@ ] }, { - "group": "الأتمتة", + "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -13896,7 +13926,7 @@ ] }, { - "group": "التعلّم", + "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -13933,17 +13963,17 @@ ] }, { - "tab": "المؤسسات", + "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", "icon": "briefcase", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "البناء", + "group": "\u0627\u0644\u0628\u0646\u0627\u0621", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -13954,7 +13984,7 @@ ] }, { - "group": "العمليات", + "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -13963,13 +13993,13 @@ ] }, { - "group": "الإدارة", + "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "التكاملات", + "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -14021,7 +14051,7 @@ ] }, { - "group": "المشغّلات", + "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -14037,7 +14067,7 @@ ] }, { - "group": "موارد التعلّم", + "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -14045,11 +14075,11 @@ ] }, { - "tab": "API المرجع", + "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", "icon": "magnifying-glass", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -14061,11 +14091,11 @@ ] }, { - "tab": "أمثلة", + "tab": "\u0623\u0645\u062b\u0644\u0629", "icon": "code", "groups": [ { - "group": "أمثلة", + "group": "\u0623\u0645\u062b\u0644\u0629", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -14074,11 +14104,11 @@ ] }, { - "tab": "التغييرات السجلات", + "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", "icon": "clock", "groups": [ { - "group": "سجل التغييرات", + "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", "pages": [ "ar/changelog" ] @@ -14091,11 +14121,11 @@ "version": "v1.10.1", "tabs": [ { - "tab": "الرئيسية", + "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", "icon": "house", "groups": [ { - "group": "مرحباً", + "group": "\u0645\u0631\u062d\u0628\u0627\u064b", "pages": [ "ar/index" ] @@ -14103,11 +14133,11 @@ ] }, { - "tab": "التقنية التوثيق", + "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", "icon": "book-open", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/introduction", "ar/installation", @@ -14115,31 +14145,31 @@ ] }, { - "group": "الأدلّة", + "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", "pages": [ { - "group": "الاستراتيجية", + "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "الوكلاء", + "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "الطواقم", + "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "التدفقات", + "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -14147,21 +14177,21 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "أدوات البرمجة", + "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "متقدّم", + "group": "\u0645\u062a\u0642\u062f\u0651\u0645", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -14169,7 +14199,7 @@ ] }, { - "group": "الترحيل", + "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -14178,7 +14208,7 @@ ] }, { - "group": "المفاهيم الأساسية", + "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", "pages": [ "ar/concepts/agents", "ar/concepts/tasks", @@ -14197,11 +14227,12 @@ "ar/concepts/testing", "ar/concepts/cli", "ar/concepts/tools", - "ar/concepts/event-listener" + "ar/concepts/event-listener", + "ar/concepts/checkpointing" ] }, { - "group": "تكامل MCP", + "group": "\u062a\u0643\u0627\u0645\u0644 MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -14213,11 +14244,11 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "pages": [ "ar/tools/overview", { - "group": "الملفات والمستندات", + "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -14237,7 +14268,7 @@ ] }, { - "group": "استخراج بيانات الويب", + "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -14257,7 +14288,7 @@ ] }, { - "group": "البحث والاستكشاف", + "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -14279,7 +14310,7 @@ ] }, { - "group": "قواعد البيانات", + "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -14294,7 +14325,7 @@ ] }, { - "group": "الذكاء الاصطناعي والتعلّم الآلي", + "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -14308,7 +14339,7 @@ ] }, { - "group": "التخزين السحابي", + "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -14327,7 +14358,7 @@ ] }, { - "group": "الأتمتة", + "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -14362,7 +14393,7 @@ ] }, { - "group": "التعلّم", + "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -14399,17 +14430,17 @@ ] }, { - "tab": "المؤسسات", + "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", "icon": "briefcase", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "البناء", + "group": "\u0627\u0644\u0628\u0646\u0627\u0621", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -14420,7 +14451,7 @@ ] }, { - "group": "العمليات", + "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -14429,13 +14460,13 @@ ] }, { - "group": "الإدارة", + "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "التكاملات", + "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -14487,7 +14518,7 @@ ] }, { - "group": "المشغّلات", + "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -14503,7 +14534,7 @@ ] }, { - "group": "موارد التعلّم", + "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -14511,11 +14542,11 @@ ] }, { - "tab": "API المرجع", + "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", "icon": "magnifying-glass", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -14527,11 +14558,11 @@ ] }, { - "tab": "أمثلة", + "tab": "\u0623\u0645\u062b\u0644\u0629", "icon": "code", "groups": [ { - "group": "أمثلة", + "group": "\u0623\u0645\u062b\u0644\u0629", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -14540,11 +14571,11 @@ ] }, { - "tab": "التغييرات السجلات", + "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", "icon": "clock", "groups": [ { - "group": "سجل التغييرات", + "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", "pages": [ "ar/changelog" ] @@ -14557,11 +14588,11 @@ "version": "v1.10.0", "tabs": [ { - "tab": "الرئيسية", + "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", "icon": "house", "groups": [ { - "group": "مرحباً", + "group": "\u0645\u0631\u062d\u0628\u0627\u064b", "pages": [ "ar/index" ] @@ -14569,11 +14600,11 @@ ] }, { - "tab": "التقنية التوثيق", + "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", "icon": "book-open", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/introduction", "ar/installation", @@ -14581,31 +14612,31 @@ ] }, { - "group": "الأدلّة", + "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", "pages": [ { - "group": "الاستراتيجية", + "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "الوكلاء", + "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "الطواقم", + "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "التدفقات", + "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -14613,21 +14644,21 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "أدوات البرمجة", + "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "متقدّم", + "group": "\u0645\u062a\u0642\u062f\u0651\u0645", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -14635,7 +14666,7 @@ ] }, { - "group": "الترحيل", + "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -14644,7 +14675,7 @@ ] }, { - "group": "المفاهيم الأساسية", + "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", "pages": [ "ar/concepts/agents", "ar/concepts/tasks", @@ -14664,11 +14695,12 @@ "ar/concepts/testing", "ar/concepts/cli", "ar/concepts/tools", - "ar/concepts/event-listener" + "ar/concepts/event-listener", + "ar/concepts/checkpointing" ] }, { - "group": "تكامل MCP", + "group": "\u062a\u0643\u0627\u0645\u0644 MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -14680,11 +14712,11 @@ ] }, { - "group": "الأدوات", + "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", "pages": [ "ar/tools/overview", { - "group": "الملفات والمستندات", + "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -14704,7 +14736,7 @@ ] }, { - "group": "استخراج بيانات الويب", + "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -14724,7 +14756,7 @@ ] }, { - "group": "البحث والاستكشاف", + "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -14746,7 +14778,7 @@ ] }, { - "group": "قواعد البيانات", + "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -14761,7 +14793,7 @@ ] }, { - "group": "الذكاء الاصطناعي والتعلّم الآلي", + "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -14775,7 +14807,7 @@ ] }, { - "group": "التخزين السحابي", + "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -14794,7 +14826,7 @@ ] }, { - "group": "الأتمتة", + "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -14829,7 +14861,7 @@ ] }, { - "group": "التعلّم", + "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -14866,17 +14898,17 @@ ] }, { - "tab": "المؤسسات", + "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", "icon": "briefcase", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "البناء", + "group": "\u0627\u0644\u0628\u0646\u0627\u0621", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -14887,7 +14919,7 @@ ] }, { - "group": "العمليات", + "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -14896,13 +14928,13 @@ ] }, { - "group": "الإدارة", + "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "التكاملات", + "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -14954,7 +14986,7 @@ ] }, { - "group": "المشغّلات", + "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -14970,7 +15002,7 @@ ] }, { - "group": "موارد التعلّم", + "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -14978,11 +15010,11 @@ ] }, { - "tab": "API المرجع", + "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", "icon": "magnifying-glass", "groups": [ { - "group": "البدء", + "group": "\u0627\u0644\u0628\u062f\u0621", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -14994,11 +15026,11 @@ ] }, { - "tab": "أمثلة", + "tab": "\u0623\u0645\u062b\u0644\u0629", "icon": "code", "groups": [ { - "group": "أمثلة", + "group": "\u0623\u0645\u062b\u0644\u0629", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -15007,11 +15039,11 @@ ] }, { - "tab": "التغييرات السجلات", + "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", "icon": "clock", "groups": [ { - "group": "سجل التغييرات", + "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", "pages": [ "ar/changelog" ] diff --git a/docs/en/concepts/checkpointing.mdx b/docs/en/concepts/checkpointing.mdx new file mode 100644 index 000000000..799f674d3 --- /dev/null +++ b/docs/en/concepts/checkpointing.mdx @@ -0,0 +1,187 @@ +--- +title: Checkpointing +description: Automatically save execution state so crews, flows, and agents can resume after failures. +icon: floppy-disk +mode: "wide" +--- + + +Checkpointing is in early release. APIs may change in future versions. + + +## Overview + +Checkpointing automatically saves execution state during a run. If a crew, flow, or agent fails mid-execution, you can restore from the last checkpoint and resume without re-running completed work. + +## Quick Start + +```python +from crewai import Crew, CheckpointConfig + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=True, # uses defaults: ./.checkpoints, on task_completed +) +result = crew.kickoff() +``` + +Checkpoint files are written to `./.checkpoints/` after each completed task. + +## Configuration + +Use `CheckpointConfig` for full control: + +```python +from crewai import Crew, CheckpointConfig + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=CheckpointConfig( + directory="./my_checkpoints", + on_events=["task_completed", "crew_kickoff_completed"], + max_checkpoints=5, + ), +) +``` + +### CheckpointConfig Fields + +| Field | Type | Default | Description | +|:------|:-----|:--------|:------------| +| `directory` | `str` | `"./.checkpoints"` | Filesystem path for checkpoint files | +| `on_events` | `list[str]` | `["task_completed"]` | Event types that trigger a checkpoint | +| `provider` | `BaseProvider` | `JsonProvider()` | Storage backend | +| `max_checkpoints` | `int \| None` | `None` | Max files to keep; oldest pruned first | + +### Inheritance and Opt-Out + +The `checkpoint` field on Crew, Flow, and Agent accepts `CheckpointConfig`, `True`, `False`, or `None`: + +| Value | Behavior | +|:------|:---------| +| `None` (default) | Inherit from parent. An agent inherits its crew's config. | +| `True` | Enable with defaults. | +| `False` | Explicit opt-out. Stops inheritance from parent. | +| `CheckpointConfig(...)` | Custom configuration. | + +```python +crew = Crew( + agents=[ + Agent(role="Researcher", ...), # inherits crew's checkpoint + Agent(role="Writer", ..., checkpoint=False), # opted out, no checkpoints + ], + tasks=[...], + checkpoint=True, +) +``` + +## Resuming from a Checkpoint + +```python +# Restore and resume +crew = Crew.from_checkpoint("./my_checkpoints/20260407T120000_abc123.json") +result = crew.kickoff() # picks up from last completed task +``` + +The restored crew skips already-completed tasks and resumes from the first incomplete one. + +## Works on Crew, Flow, and Agent + +### Crew + +```python +crew = Crew( + agents=[researcher, writer], + tasks=[research_task, write_task, review_task], + checkpoint=CheckpointConfig(directory="./crew_cp"), +) +``` + +Default trigger: `task_completed` (one checkpoint per finished task). + +### Flow + +```python +from crewai.flow.flow import Flow, start, listen +from crewai import CheckpointConfig + +class MyFlow(Flow): + @start() + def step_one(self): + return "data" + + @listen(step_one) + def step_two(self, data): + return process(data) + +flow = MyFlow( + checkpoint=CheckpointConfig( + directory="./flow_cp", + on_events=["method_execution_finished"], + ), +) +result = flow.kickoff() + +# Resume +flow = MyFlow.from_checkpoint("./flow_cp/20260407T120000_abc123.json") +result = flow.kickoff() +``` + +### Agent + +```python +agent = Agent( + role="Researcher", + goal="Research topics", + backstory="Expert researcher", + checkpoint=CheckpointConfig( + directory="./agent_cp", + on_events=["lite_agent_execution_completed"], + ), +) +result = agent.kickoff(messages=[{"role": "user", "content": "Research AI trends"}]) +``` + +## Event Types + +The `on_events` field accepts any combination of event type strings. Common choices: + +| Use Case | Events | +|:---------|:-------| +| After each task (Crew) | `["task_completed"]` | +| After each flow method | `["method_execution_finished"]` | +| After agent execution | `["agent_execution_completed"]`, `["lite_agent_execution_completed"]` | +| On crew completion only | `["crew_kickoff_completed"]` | +| After every LLM call | `["llm_call_completed"]` | +| On everything | `["*"]` | + + +Using `["*"]` or high-frequency events like `llm_call_completed` will write many checkpoint files and may impact performance. Use `max_checkpoints` to limit disk usage. + + +## Manual Checkpointing + +For full control, register your own event handler and call `state.checkpoint()` directly: + +```python +from crewai.events.event_bus import crewai_event_bus +from crewai.events.types.llm_events import LLMCallCompletedEvent + +# Sync handler +@crewai_event_bus.on(LLMCallCompletedEvent) +def on_llm_done(source, event, state): + path = state.checkpoint("./my_checkpoints") + print(f"Saved checkpoint: {path}") + +# Async handler +@crewai_event_bus.on(LLMCallCompletedEvent) +async def on_llm_done_async(source, event, state): + path = await state.acheckpoint("./my_checkpoints") + print(f"Saved checkpoint: {path}") +``` + +The `state` argument is the `RuntimeState` passed automatically by the event bus when your handler accepts 3 parameters. You can register handlers on any event type listed in the [Event Listeners](/en/concepts/event-listener) documentation. + +Checkpointing is best-effort: if a checkpoint write fails, the error is logged but execution continues uninterrupted. diff --git a/docs/ko/concepts/checkpointing.mdx b/docs/ko/concepts/checkpointing.mdx new file mode 100644 index 000000000..da33aa3c8 --- /dev/null +++ b/docs/ko/concepts/checkpointing.mdx @@ -0,0 +1,187 @@ +--- +title: Checkpointing +description: 실행 상태를 자동으로 저장하여 크루, 플로우, 에이전트가 실패 후 재개할 수 있습니다. +icon: floppy-disk +mode: "wide" +--- + + +체크포인팅은 초기 릴리스 단계입니다. API는 향후 버전에서 변경될 수 있습니다. + + +## 개요 + +체크포인팅은 실행 중 자동으로 실행 상태를 저장합니다. 크루, 플로우 또는 에이전트가 실행 도중 실패하면 마지막 체크포인트에서 복원하여 이미 완료된 작업을 다시 실행하지 않고 재개할 수 있습니다. + +## 빠른 시작 + +```python +from crewai import Crew, CheckpointConfig + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=True, # 기본값 사용: ./.checkpoints, task_completed 이벤트 +) +result = crew.kickoff() +``` + +각 태스크가 완료된 후 `./.checkpoints/`에 체크포인트 파일이 기록됩니다. + +## 설정 + +`CheckpointConfig`를 사용하여 세부 설정을 제어합니다: + +```python +from crewai import Crew, CheckpointConfig + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=CheckpointConfig( + directory="./my_checkpoints", + on_events=["task_completed", "crew_kickoff_completed"], + max_checkpoints=5, + ), +) +``` + +### CheckpointConfig 필드 + +| 필드 | 타입 | 기본값 | 설명 | +|:-----|:-----|:-------|:-----| +| `directory` | `str` | `"./.checkpoints"` | 체크포인트 파일 경로 | +| `on_events` | `list[str]` | `["task_completed"]` | 체크포인트를 트리거하는 이벤트 타입 | +| `provider` | `BaseProvider` | `JsonProvider()` | 스토리지 백엔드 | +| `max_checkpoints` | `int \| None` | `None` | 보관할 최대 파일 수; 오래된 것부터 삭제 | + +### 상속 및 옵트아웃 + +Crew, Flow, Agent의 `checkpoint` 필드는 `CheckpointConfig`, `True`, `False`, `None`을 받습니다: + +| 값 | 동작 | +|:---|:-----| +| `None` (기본값) | 부모에서 상속. 에이전트는 크루의 설정을 상속합니다. | +| `True` | 기본값으로 활성화. | +| `False` | 명시적 옵트아웃. 부모 상속을 중단합니다. | +| `CheckpointConfig(...)` | 사용자 정의 설정. | + +```python +crew = Crew( + agents=[ + Agent(role="Researcher", ...), # 크루의 checkpoint 상속 + Agent(role="Writer", ..., checkpoint=False), # 옵트아웃, 체크포인트 없음 + ], + tasks=[...], + checkpoint=True, +) +``` + +## 체크포인트에서 재개 + +```python +# 복원 및 재개 +crew = Crew.from_checkpoint("./my_checkpoints/20260407T120000_abc123.json") +result = crew.kickoff() # 마지막으로 완료된 태스크부터 재개 +``` + +복원된 크루는 이미 완료된 태스크를 건너뛰고 첫 번째 미완료 태스크부터 재개합니다. + +## Crew, Flow, Agent에서 사용 가능 + +### Crew + +```python +crew = Crew( + agents=[researcher, writer], + tasks=[research_task, write_task, review_task], + checkpoint=CheckpointConfig(directory="./crew_cp"), +) +``` + +기본 트리거: `task_completed` (완료된 태스크당 하나의 체크포인트). + +### Flow + +```python +from crewai.flow.flow import Flow, start, listen +from crewai import CheckpointConfig + +class MyFlow(Flow): + @start() + def step_one(self): + return "data" + + @listen(step_one) + def step_two(self, data): + return process(data) + +flow = MyFlow( + checkpoint=CheckpointConfig( + directory="./flow_cp", + on_events=["method_execution_finished"], + ), +) +result = flow.kickoff() + +# 재개 +flow = MyFlow.from_checkpoint("./flow_cp/20260407T120000_abc123.json") +result = flow.kickoff() +``` + +### Agent + +```python +agent = Agent( + role="Researcher", + goal="Research topics", + backstory="Expert researcher", + checkpoint=CheckpointConfig( + directory="./agent_cp", + on_events=["lite_agent_execution_completed"], + ), +) +result = agent.kickoff(messages=[{"role": "user", "content": "Research AI trends"}]) +``` + +## 이벤트 타입 + +`on_events` 필드는 이벤트 타입 문자열의 조합을 받습니다. 일반적인 선택: + +| 사용 사례 | 이벤트 | +|:----------|:-------| +| 각 태스크 완료 후 (Crew) | `["task_completed"]` | +| 각 플로우 메서드 완료 후 | `["method_execution_finished"]` | +| 에이전트 실행 완료 후 | `["agent_execution_completed"]`, `["lite_agent_execution_completed"]` | +| 크루 완료 시에만 | `["crew_kickoff_completed"]` | +| 모든 LLM 호출 후 | `["llm_call_completed"]` | +| 모든 이벤트 | `["*"]` | + + +`["*"]` 또는 `llm_call_completed`와 같은 고빈도 이벤트를 사용하면 많은 체크포인트 파일이 생성되어 성능에 영향을 줄 수 있습니다. `max_checkpoints`를 사용하여 디스크 사용량을 제한하세요. + + +## 수동 체크포인팅 + +완전한 제어를 위해 자체 이벤트 핸들러를 등록하고 `state.checkpoint()`를 직접 호출할 수 있습니다: + +```python +from crewai.events.event_bus import crewai_event_bus +from crewai.events.types.llm_events import LLMCallCompletedEvent + +# 동기 핸들러 +@crewai_event_bus.on(LLMCallCompletedEvent) +def on_llm_done(source, event, state): + path = state.checkpoint("./my_checkpoints") + print(f"체크포인트 저장: {path}") + +# 비동기 핸들러 +@crewai_event_bus.on(LLMCallCompletedEvent) +async def on_llm_done_async(source, event, state): + path = await state.acheckpoint("./my_checkpoints") + print(f"체크포인트 저장: {path}") +``` + +`state` 인수는 핸들러가 3개의 매개변수를 받을 때 이벤트 버스가 자동으로 전달하는 `RuntimeState`입니다. [Event Listeners](/ko/concepts/event-listener) 문서에 나열된 모든 이벤트 타입에 핸들러를 등록할 수 있습니다. + +체크포인팅은 best-effort입니다: 체크포인트 기록이 실패하면 오류가 로그에 기록되지만 실행은 중단 없이 계속됩니다. diff --git a/docs/pt-BR/concepts/checkpointing.mdx b/docs/pt-BR/concepts/checkpointing.mdx new file mode 100644 index 000000000..251691243 --- /dev/null +++ b/docs/pt-BR/concepts/checkpointing.mdx @@ -0,0 +1,187 @@ +--- +title: Checkpointing +description: Salve automaticamente o estado de execucao para que crews, flows e agentes possam retomar apos falhas. +icon: floppy-disk +mode: "wide" +--- + + +O checkpointing esta em versao inicial. As APIs podem mudar em versoes futuras. + + +## Visao Geral + +O checkpointing salva automaticamente o estado de execucao durante uma execucao. Se uma crew, flow ou agente falhar no meio da execucao, voce pode restaurar a partir do ultimo checkpoint e retomar sem reexecutar o trabalho ja concluido. + +## Inicio Rapido + +```python +from crewai import Crew, CheckpointConfig + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=True, # usa padroes: ./.checkpoints, em task_completed +) +result = crew.kickoff() +``` + +Os arquivos de checkpoint sao gravados em `./.checkpoints/` apos cada tarefa concluida. + +## Configuracao + +Use `CheckpointConfig` para controle total: + +```python +from crewai import Crew, CheckpointConfig + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=CheckpointConfig( + directory="./my_checkpoints", + on_events=["task_completed", "crew_kickoff_completed"], + max_checkpoints=5, + ), +) +``` + +### Campos do CheckpointConfig + +| Campo | Tipo | Padrao | Descricao | +|:------|:-----|:-------|:----------| +| `directory` | `str` | `"./.checkpoints"` | Caminho para os arquivos de checkpoint | +| `on_events` | `list[str]` | `["task_completed"]` | Tipos de evento que acionam um checkpoint | +| `provider` | `BaseProvider` | `JsonProvider()` | Backend de armazenamento | +| `max_checkpoints` | `int \| None` | `None` | Maximo de arquivos a manter; os mais antigos sao removidos primeiro | + +### Heranca e Desativacao + +O campo `checkpoint` em Crew, Flow e Agent aceita `CheckpointConfig`, `True`, `False` ou `None`: + +| Valor | Comportamento | +|:------|:--------------| +| `None` (padrao) | Herda do pai. Um agente herda a configuracao da crew. | +| `True` | Ativa com padroes. | +| `False` | Desativacao explicita. Interrompe a heranca do pai. | +| `CheckpointConfig(...)` | Configuracao personalizada. | + +```python +crew = Crew( + agents=[ + Agent(role="Researcher", ...), # herda checkpoint da crew + Agent(role="Writer", ..., checkpoint=False), # desativado, sem checkpoints + ], + tasks=[...], + checkpoint=True, +) +``` + +## Retomando a partir de um Checkpoint + +```python +# Restaurar e retomar +crew = Crew.from_checkpoint("./my_checkpoints/20260407T120000_abc123.json") +result = crew.kickoff() # retoma a partir da ultima tarefa concluida +``` + +A crew restaurada pula tarefas ja concluidas e retoma a partir da primeira incompleta. + +## Funciona em Crew, Flow e Agent + +### Crew + +```python +crew = Crew( + agents=[researcher, writer], + tasks=[research_task, write_task, review_task], + checkpoint=CheckpointConfig(directory="./crew_cp"), +) +``` + +Gatilho padrao: `task_completed` (um checkpoint por tarefa finalizada). + +### Flow + +```python +from crewai.flow.flow import Flow, start, listen +from crewai import CheckpointConfig + +class MyFlow(Flow): + @start() + def step_one(self): + return "data" + + @listen(step_one) + def step_two(self, data): + return process(data) + +flow = MyFlow( + checkpoint=CheckpointConfig( + directory="./flow_cp", + on_events=["method_execution_finished"], + ), +) +result = flow.kickoff() + +# Retomar +flow = MyFlow.from_checkpoint("./flow_cp/20260407T120000_abc123.json") +result = flow.kickoff() +``` + +### Agent + +```python +agent = Agent( + role="Researcher", + goal="Research topics", + backstory="Expert researcher", + checkpoint=CheckpointConfig( + directory="./agent_cp", + on_events=["lite_agent_execution_completed"], + ), +) +result = agent.kickoff(messages=[{"role": "user", "content": "Research AI trends"}]) +``` + +## Tipos de Evento + +O campo `on_events` aceita qualquer combinacao de strings de tipo de evento. Escolhas comuns: + +| Caso de Uso | Eventos | +|:------------|:--------| +| Apos cada tarefa (Crew) | `["task_completed"]` | +| Apos cada metodo do flow | `["method_execution_finished"]` | +| Apos execucao do agente | `["agent_execution_completed"]`, `["lite_agent_execution_completed"]` | +| Apenas na conclusao da crew | `["crew_kickoff_completed"]` | +| Apos cada chamada LLM | `["llm_call_completed"]` | +| Em tudo | `["*"]` | + + +Usar `["*"]` ou eventos de alta frequencia como `llm_call_completed` gravara muitos arquivos de checkpoint e pode impactar o desempenho. Use `max_checkpoints` para limitar o uso de disco. + + +## Checkpointing Manual + +Para controle total, registre seu proprio handler de evento e chame `state.checkpoint()` diretamente: + +```python +from crewai.events.event_bus import crewai_event_bus +from crewai.events.types.llm_events import LLMCallCompletedEvent + +# Handler sincrono +@crewai_event_bus.on(LLMCallCompletedEvent) +def on_llm_done(source, event, state): + path = state.checkpoint("./my_checkpoints") + print(f"Checkpoint salvo: {path}") + +# Handler assincrono +@crewai_event_bus.on(LLMCallCompletedEvent) +async def on_llm_done_async(source, event, state): + path = await state.acheckpoint("./my_checkpoints") + print(f"Checkpoint salvo: {path}") +``` + +O argumento `state` e o `RuntimeState` passado automaticamente pelo barramento de eventos quando seu handler aceita 3 parametros. Voce pode registrar handlers em qualquer tipo de evento listado na documentacao de [Event Listeners](/pt-BR/concepts/event-listener). + +O checkpointing e best-effort: se uma gravacao de checkpoint falhar, o erro e registrado no log, mas a execucao continua sem interrupcao. diff --git a/lib/crewai/src/crewai/__init__.py b/lib/crewai/src/crewai/__init__.py index 01be9fead..8a7d6dd3f 100644 --- a/lib/crewai/src/crewai/__init__.py +++ b/lib/crewai/src/crewai/__init__.py @@ -16,6 +16,7 @@ from crewai.knowledge.knowledge import Knowledge from crewai.llm import LLM from crewai.llms.base_llm import BaseLLM from crewai.process import Process +from crewai.state.checkpoint_config import CheckpointConfig # noqa: F401 from crewai.task import Task from crewai.tasks.llm_guardrail import LLMGuardrail from crewai.tasks.task_output import TaskOutput @@ -210,6 +211,7 @@ try: Agent.model_rebuild(force=True, _types_namespace=_full_namespace) except PydanticUserError: pass + except (ImportError, PydanticUserError): import logging as _logging diff --git a/lib/crewai/src/crewai/agents/agent_builder/base_agent.py b/lib/crewai/src/crewai/agents/agent_builder/base_agent.py index cfa08bbc3..dbff05e4d 100644 --- a/lib/crewai/src/crewai/agents/agent_builder/base_agent.py +++ b/lib/crewai/src/crewai/agents/agent_builder/base_agent.py @@ -39,6 +39,7 @@ from crewai.memory.unified_memory import Memory from crewai.rag.embeddings.types import EmbedderConfig from crewai.security.security_config import SecurityConfig from crewai.skills.models import Skill +from crewai.state.checkpoint_config import CheckpointConfig from crewai.tools.base_tool import BaseTool, Tool from crewai.types.callback import SerializableCallable from crewai.utilities.config import process_config @@ -299,6 +300,11 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta): default_factory=SecurityConfig, description="Security configuration for the agent, including fingerprinting.", ) + checkpoint: CheckpointConfig | bool | None = Field( + default=None, + description="Automatic checkpointing configuration. " + "True for defaults, False to opt out, None to inherit.", + ) callbacks: list[SerializableCallable] = Field( default_factory=list, description="Callbacks to be used for the agent" ) diff --git a/lib/crewai/src/crewai/crew.py b/lib/crewai/src/crewai/crew.py index 2e7964fb1..4f9ebab5d 100644 --- a/lib/crewai/src/crewai/crew.py +++ b/lib/crewai/src/crewai/crew.py @@ -104,6 +104,7 @@ from crewai.rag.types import SearchResult from crewai.security.fingerprint import Fingerprint from crewai.security.security_config import SecurityConfig from crewai.skills.models import Skill +from crewai.state.checkpoint_config import CheckpointConfig from crewai.task import Task from crewai.tasks.conditional_task import ConditionalTask from crewai.tasks.task_output import TaskOutput @@ -340,6 +341,11 @@ class Crew(FlowTrackable, BaseModel): default_factory=SecurityConfig, description="Security configuration for the crew, including fingerprinting.", ) + checkpoint: CheckpointConfig | bool | None = Field( + default=None, + description="Automatic checkpointing configuration. " + "True for defaults, False to opt out, None to inherit.", + ) token_usage: UsageMetrics | None = Field( default=None, description="Metrics for the LLM usage during all tasks execution.", diff --git a/lib/crewai/src/crewai/flow/flow.py b/lib/crewai/src/crewai/flow/flow.py index d99aa05de..76a96b3f9 100644 --- a/lib/crewai/src/crewai/flow/flow.py +++ b/lib/crewai/src/crewai/flow/flow.py @@ -113,6 +113,7 @@ from crewai.flow.utils import ( ) from crewai.memory.memory_scope import MemoryScope, MemorySlice from crewai.memory.unified_memory import Memory +from crewai.state.checkpoint_config import CheckpointConfig if TYPE_CHECKING: @@ -920,6 +921,7 @@ class Flow(BaseModel, Generic[T], metaclass=FlowMeta): max_method_calls: int = Field(default=100) execution_context: ExecutionContext | None = Field(default=None) + checkpoint: CheckpointConfig | bool | None = Field(default=None) @classmethod def from_checkpoint( diff --git a/lib/crewai/src/crewai/state/__init__.py b/lib/crewai/src/crewai/state/__init__.py index e69de29bb..d8f3419c7 100644 --- a/lib/crewai/src/crewai/state/__init__.py +++ b/lib/crewai/src/crewai/state/__init__.py @@ -0,0 +1,4 @@ +from crewai.state.checkpoint_config import CheckpointConfig, CheckpointEventType + + +__all__ = ["CheckpointConfig", "CheckpointEventType"] diff --git a/lib/crewai/src/crewai/state/checkpoint_config.py b/lib/crewai/src/crewai/state/checkpoint_config.py new file mode 100644 index 000000000..4c60fd35c --- /dev/null +++ b/lib/crewai/src/crewai/state/checkpoint_config.py @@ -0,0 +1,193 @@ +"""Checkpoint configuration for automatic state persistence.""" + +from __future__ import annotations + +from typing import Literal + +from pydantic import BaseModel, Field + +from crewai.state.provider.core import BaseProvider +from crewai.state.provider.json_provider import JsonProvider + + +CheckpointEventType = Literal[ + # Task + "task_started", + "task_completed", + "task_failed", + "task_evaluation", + # Crew + "crew_kickoff_started", + "crew_kickoff_completed", + "crew_kickoff_failed", + "crew_train_started", + "crew_train_completed", + "crew_train_failed", + "crew_test_started", + "crew_test_completed", + "crew_test_failed", + "crew_test_result", + # Agent + "agent_execution_started", + "agent_execution_completed", + "agent_execution_error", + "lite_agent_execution_started", + "lite_agent_execution_completed", + "lite_agent_execution_error", + "agent_evaluation_started", + "agent_evaluation_completed", + "agent_evaluation_failed", + # Flow + "flow_created", + "flow_started", + "flow_finished", + "flow_paused", + "method_execution_started", + "method_execution_finished", + "method_execution_failed", + "method_execution_paused", + "human_feedback_requested", + "human_feedback_received", + "flow_input_requested", + "flow_input_received", + # LLM + "llm_call_started", + "llm_call_completed", + "llm_call_failed", + "llm_stream_chunk", + "llm_thinking_chunk", + # LLM Guardrail + "llm_guardrail_started", + "llm_guardrail_completed", + "llm_guardrail_failed", + # Tool + "tool_usage_started", + "tool_usage_finished", + "tool_usage_error", + "tool_validate_input_error", + "tool_selection_error", + "tool_execution_error", + # Memory + "memory_save_started", + "memory_save_completed", + "memory_save_failed", + "memory_query_started", + "memory_query_completed", + "memory_query_failed", + "memory_retrieval_started", + "memory_retrieval_completed", + "memory_retrieval_failed", + # Knowledge + "knowledge_search_query_started", + "knowledge_search_query_completed", + "knowledge_query_started", + "knowledge_query_completed", + "knowledge_query_failed", + "knowledge_search_query_failed", + # Reasoning + "agent_reasoning_started", + "agent_reasoning_completed", + "agent_reasoning_failed", + # MCP + "mcp_connection_started", + "mcp_connection_completed", + "mcp_connection_failed", + "mcp_tool_execution_started", + "mcp_tool_execution_completed", + "mcp_tool_execution_failed", + "mcp_config_fetch_failed", + # Observation + "step_observation_started", + "step_observation_completed", + "step_observation_failed", + "plan_refinement", + "plan_replan_triggered", + "goal_achieved_early", + # Skill + "skill_discovery_started", + "skill_discovery_completed", + "skill_loaded", + "skill_activated", + "skill_load_failed", + # Logging + "agent_logs_started", + "agent_logs_execution", + # A2A + "a2a_delegation_started", + "a2a_delegation_completed", + "a2a_conversation_started", + "a2a_conversation_completed", + "a2a_message_sent", + "a2a_response_received", + "a2a_polling_started", + "a2a_polling_status", + "a2a_push_notification_registered", + "a2a_push_notification_received", + "a2a_push_notification_sent", + "a2a_push_notification_timeout", + "a2a_streaming_started", + "a2a_streaming_chunk", + "a2a_agent_card_fetched", + "a2a_authentication_failed", + "a2a_artifact_received", + "a2a_connection_error", + "a2a_server_task_started", + "a2a_server_task_completed", + "a2a_server_task_canceled", + "a2a_server_task_failed", + "a2a_parallel_delegation_started", + "a2a_parallel_delegation_completed", + "a2a_transport_negotiated", + "a2a_content_type_negotiated", + "a2a_context_created", + "a2a_context_expired", + "a2a_context_idle", + "a2a_context_completed", + "a2a_context_pruned", + # System + "SIGTERM", + "SIGINT", + "SIGHUP", + "SIGTSTP", + "SIGCONT", + # Env + "cc_env", + "codex_env", + "cursor_env", + "default_env", +] + + +class CheckpointConfig(BaseModel): + """Configuration for automatic checkpointing. + + When set on a Crew, Flow, or Agent, checkpoints are written + automatically whenever the specified event(s) fire. + """ + + directory: str = Field( + default="./.checkpoints", + description="Filesystem path where checkpoint JSON files are written.", + ) + on_events: list[CheckpointEventType | Literal["*"]] = Field( + default=["task_completed"], + description="Event types that trigger a checkpoint write. " + 'Use ["*"] to checkpoint on every event.', + ) + provider: BaseProvider = Field( + default_factory=JsonProvider, + description="Storage backend. Defaults to JsonProvider.", + ) + max_checkpoints: int | None = Field( + default=None, + description="Maximum checkpoint files to keep. Oldest are pruned first. " + "None means keep all.", + ) + + @property + def trigger_all(self) -> bool: + return "*" in self.on_events + + @property + def trigger_events(self) -> set[str]: + return set(self.on_events) diff --git a/lib/crewai/src/crewai/state/checkpoint_listener.py b/lib/crewai/src/crewai/state/checkpoint_listener.py new file mode 100644 index 000000000..cf5b39b2b --- /dev/null +++ b/lib/crewai/src/crewai/state/checkpoint_listener.py @@ -0,0 +1,176 @@ +"""Event listener that writes checkpoints automatically. + +Handlers are registered lazily — only when the first ``CheckpointConfig`` +is resolved (i.e. an entity actually has checkpointing enabled). This +avoids per-event overhead when no entity uses checkpointing. +""" + +from __future__ import annotations + +import glob +import logging +import os +import threading +from typing import Any + +from crewai.agents.agent_builder.base_agent import BaseAgent +from crewai.crew import Crew +from crewai.events.base_events import BaseEvent +from crewai.events.event_bus import CrewAIEventsBus, crewai_event_bus +from crewai.flow.flow import Flow +from crewai.state.checkpoint_config import CheckpointConfig +from crewai.state.runtime import RuntimeState, _prepare_entities +from crewai.task import Task + + +logger = logging.getLogger(__name__) + +_handlers_registered = False +_register_lock = threading.Lock() + +_SENTINEL = object() + + +def _ensure_handlers_registered() -> None: + """Register checkpoint handlers on the event bus once, lazily.""" + global _handlers_registered + if _handlers_registered: + return + with _register_lock: + if _handlers_registered: + return + _register_all_handlers(crewai_event_bus) + _handlers_registered = True + + +def _resolve(value: CheckpointConfig | bool | None) -> CheckpointConfig | None | object: + """Coerce a checkpoint field value. + + Returns: + CheckpointConfig — use this config. + _SENTINEL — explicit opt-out (``False``), stop walking parents. + None — not configured, keep walking parents. + """ + if isinstance(value, CheckpointConfig): + _ensure_handlers_registered() + return value + if value is True: + _ensure_handlers_registered() + return CheckpointConfig() + if value is False: + return _SENTINEL + return None # None = inherit + + +def _find_checkpoint(source: Any) -> CheckpointConfig | None: + """Find the CheckpointConfig for an event source. + + Walks known relationships: Task -> Agent -> Crew. Flow and Agent + carry their own checkpoint field directly. + + A ``None`` value means "not configured, inherit from parent". + A ``False`` value means "opt out" and stops the walk. + """ + if isinstance(source, Flow): + result = _resolve(source.checkpoint) + return result if isinstance(result, CheckpointConfig) else None + if isinstance(source, Crew): + result = _resolve(source.checkpoint) + return result if isinstance(result, CheckpointConfig) else None + if isinstance(source, BaseAgent): + result = _resolve(source.checkpoint) + if isinstance(result, CheckpointConfig): + return result + if result is _SENTINEL: + return None + crew = source.crew + if isinstance(crew, Crew): + result = _resolve(crew.checkpoint) + return result if isinstance(result, CheckpointConfig) else None + return None + if isinstance(source, Task): + agent = source.agent + if isinstance(agent, BaseAgent): + result = _resolve(agent.checkpoint) + if isinstance(result, CheckpointConfig): + return result + if result is _SENTINEL: + return None + crew = agent.crew + if isinstance(crew, Crew): + result = _resolve(crew.checkpoint) + return result if isinstance(result, CheckpointConfig) else None + return None + return None + + +def _do_checkpoint(state: RuntimeState, cfg: CheckpointConfig) -> None: + """Write a checkpoint synchronously and optionally prune old files.""" + _prepare_entities(state.root) + data = state.model_dump_json() + cfg.provider.checkpoint(data, cfg.directory) + + if cfg.max_checkpoints is not None: + _prune(cfg.directory, cfg.max_checkpoints) + + +def _safe_remove(path: str) -> None: + try: + os.remove(path) + except OSError: + logger.debug("Failed to remove checkpoint file %s", path, exc_info=True) + + +def _prune(directory: str, max_keep: int) -> None: + """Remove oldest checkpoint files beyond *max_keep*.""" + pattern = os.path.join(directory, "*.json") + files = sorted(glob.glob(pattern), key=os.path.getmtime) + to_remove = files if max_keep == 0 else files[:-max_keep] + for path in to_remove: + _safe_remove(path) + + +def _should_checkpoint(source: Any, event: BaseEvent) -> CheckpointConfig | None: + """Return the CheckpointConfig if this event should trigger a checkpoint.""" + cfg = _find_checkpoint(source) + if cfg is None: + return None + if not cfg.trigger_all and event.type not in cfg.trigger_events: + return None + return cfg + + +def _on_any_event(source: Any, event: BaseEvent, state: Any) -> None: + """Sync handler registered on every event class.""" + cfg = _should_checkpoint(source, event) + if cfg is None: + return + try: + _do_checkpoint(state, cfg) + except Exception: + logger.warning("Auto-checkpoint failed for event %s", event.type, exc_info=True) + + +def _register_all_handlers(event_bus: CrewAIEventsBus) -> None: + """Register the checkpoint handler on all known event classes. + + Only the sync handler is registered. The event bus runs sync handlers + in a ``ThreadPoolExecutor``, so blocking I/O is safe and we avoid + writing duplicate checkpoints from both sync and async dispatch. + """ + seen: set[type] = set() + + def _collect(cls: type[BaseEvent]) -> None: + for sub in cls.__subclasses__(): + if sub not in seen: + seen.add(sub) + type_field = sub.model_fields.get("type") + if ( + type_field + and type_field.default + and type_field.default != "base_event" + ): + event_bus.register_handler(sub, _on_any_event) + _collect(sub) + + _collect(BaseEvent) diff --git a/lib/crewai/tests/test_checkpoint.py b/lib/crewai/tests/test_checkpoint.py new file mode 100644 index 000000000..3533dac85 --- /dev/null +++ b/lib/crewai/tests/test_checkpoint.py @@ -0,0 +1,169 @@ +"""Tests for CheckpointConfig, checkpoint listener, and pruning.""" + +from __future__ import annotations + +import os +import tempfile +import time +from typing import Any +from unittest.mock import MagicMock, patch + +import pytest + +from crewai.agent.core import Agent +from crewai.agents.agent_builder.base_agent import BaseAgent +from crewai.crew import Crew +from crewai.flow.flow import Flow, start +from crewai.state.checkpoint_config import CheckpointConfig +from crewai.state.checkpoint_listener import ( + _find_checkpoint, + _prune, + _resolve, + _SENTINEL, +) +from crewai.task import Task + + +# ---------- _resolve ---------- + + +class TestResolve: + def test_none_returns_none(self) -> None: + assert _resolve(None) is None + + def test_false_returns_sentinel(self) -> None: + assert _resolve(False) is _SENTINEL + + def test_true_returns_config(self) -> None: + result = _resolve(True) + assert isinstance(result, CheckpointConfig) + assert result.directory == "./.checkpoints" + + def test_config_returns_config(self) -> None: + cfg = CheckpointConfig(directory="/tmp/cp") + assert _resolve(cfg) is cfg + + +# ---------- _find_checkpoint inheritance ---------- + + +class TestFindCheckpoint: + def _make_agent(self, checkpoint: Any = None) -> Agent: + return Agent(role="r", goal="g", backstory="b", checkpoint=checkpoint) + + def _make_crew( + self, agents: list[Agent], checkpoint: Any = None + ) -> Crew: + crew = Crew(agents=agents, tasks=[], checkpoint=checkpoint) + for a in agents: + a.crew = crew + return crew + + def test_crew_true(self) -> None: + a = self._make_agent() + self._make_crew([a], checkpoint=True) + cfg = _find_checkpoint(a) + assert isinstance(cfg, CheckpointConfig) + + def test_crew_true_agent_false_opts_out(self) -> None: + a = self._make_agent(checkpoint=False) + self._make_crew([a], checkpoint=True) + assert _find_checkpoint(a) is None + + def test_crew_none_agent_none(self) -> None: + a = self._make_agent() + self._make_crew([a]) + assert _find_checkpoint(a) is None + + def test_agent_config_overrides_crew(self) -> None: + a = self._make_agent( + checkpoint=CheckpointConfig(directory="/agent_cp") + ) + self._make_crew([a], checkpoint=True) + cfg = _find_checkpoint(a) + assert isinstance(cfg, CheckpointConfig) + assert cfg.directory == "/agent_cp" + + def test_task_inherits_from_crew(self) -> None: + a = self._make_agent() + self._make_crew([a], checkpoint=True) + task = Task(description="d", expected_output="e", agent=a) + cfg = _find_checkpoint(task) + assert isinstance(cfg, CheckpointConfig) + + def test_task_agent_false_blocks(self) -> None: + a = self._make_agent(checkpoint=False) + self._make_crew([a], checkpoint=True) + task = Task(description="d", expected_output="e", agent=a) + assert _find_checkpoint(task) is None + + def test_flow_direct(self) -> None: + flow = Flow(checkpoint=True) + cfg = _find_checkpoint(flow) + assert isinstance(cfg, CheckpointConfig) + + def test_flow_none(self) -> None: + flow = Flow() + assert _find_checkpoint(flow) is None + + def test_unknown_source(self) -> None: + assert _find_checkpoint("random") is None + + +# ---------- _prune ---------- + + +class TestPrune: + def test_prune_keeps_newest(self) -> None: + with tempfile.TemporaryDirectory() as d: + for i in range(5): + path = os.path.join(d, f"cp_{i}.json") + with open(path, "w") as f: + f.write("{}") + # Ensure distinct mtime + time.sleep(0.01) + + _prune(d, max_keep=2) + remaining = os.listdir(d) + assert len(remaining) == 2 + assert "cp_3.json" in remaining + assert "cp_4.json" in remaining + + def test_prune_zero_removes_all(self) -> None: + with tempfile.TemporaryDirectory() as d: + for i in range(3): + with open(os.path.join(d, f"cp_{i}.json"), "w") as f: + f.write("{}") + + _prune(d, max_keep=0) + assert os.listdir(d) == [] + + def test_prune_more_than_existing(self) -> None: + with tempfile.TemporaryDirectory() as d: + with open(os.path.join(d, "cp.json"), "w") as f: + f.write("{}") + + _prune(d, max_keep=10) + assert len(os.listdir(d)) == 1 + + +# ---------- CheckpointConfig ---------- + + +class TestCheckpointConfig: + def test_defaults(self) -> None: + cfg = CheckpointConfig() + assert cfg.directory == "./.checkpoints" + assert cfg.on_events == ["task_completed"] + assert cfg.max_checkpoints is None + assert not cfg.trigger_all + + def test_trigger_all(self) -> None: + cfg = CheckpointConfig(on_events=["*"]) + assert cfg.trigger_all + + def test_trigger_events(self) -> None: + cfg = CheckpointConfig( + on_events=["task_completed", "crew_kickoff_completed"] + ) + assert cfg.trigger_events == {"task_completed", "crew_kickoff_completed"} From 6b6e191532fafba24aeb7f8fd69a3aae9d699ae7 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Tue, 7 Apr 2026 05:54:05 +0800 Subject: [PATCH 03/21] feat: add SqliteProvider for checkpoint storage --- lib/crewai/src/crewai/state/__init__.py | 3 +- .../crewai/state/provider/sqlite_provider.py | 138 ++++++++++++++++++ 2 files changed, 140 insertions(+), 1 deletion(-) create mode 100644 lib/crewai/src/crewai/state/provider/sqlite_provider.py diff --git a/lib/crewai/src/crewai/state/__init__.py b/lib/crewai/src/crewai/state/__init__.py index d8f3419c7..c08d19dcd 100644 --- a/lib/crewai/src/crewai/state/__init__.py +++ b/lib/crewai/src/crewai/state/__init__.py @@ -1,4 +1,5 @@ from crewai.state.checkpoint_config import CheckpointConfig, CheckpointEventType +from crewai.state.provider.sqlite_provider import SqliteProvider -__all__ = ["CheckpointConfig", "CheckpointEventType"] +__all__ = ["CheckpointConfig", "CheckpointEventType", "SqliteProvider"] diff --git a/lib/crewai/src/crewai/state/provider/sqlite_provider.py b/lib/crewai/src/crewai/state/provider/sqlite_provider.py new file mode 100644 index 000000000..51f7096d2 --- /dev/null +++ b/lib/crewai/src/crewai/state/provider/sqlite_provider.py @@ -0,0 +1,138 @@ +"""SQLite state provider for checkpointing.""" + +from __future__ import annotations + +from datetime import datetime, timezone +from pathlib import Path +import sqlite3 +import uuid + +import aiosqlite + +from crewai.state.provider.core import BaseProvider + + +_CREATE_TABLE = """ +CREATE TABLE IF NOT EXISTS checkpoints ( + id TEXT PRIMARY KEY, + created_at TEXT NOT NULL, + data TEXT NOT NULL +) +""" + +_INSERT = "INSERT INTO checkpoints (id, created_at, data) VALUES (?, ?, ?)" +_SELECT = "SELECT data FROM checkpoints WHERE id = ?" +_PRUNE = """ +DELETE FROM checkpoints WHERE rowid NOT IN ( + SELECT rowid FROM checkpoints ORDER BY rowid DESC LIMIT ? +) +""" + + +def _make_id() -> tuple[str, str]: + """Generate a checkpoint ID and ISO timestamp. + + Returns: + A tuple of (checkpoint_id, timestamp). + """ + ts = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%S") + checkpoint_id = f"{ts}_{uuid.uuid4().hex[:8]}" + return checkpoint_id, ts + + +class SqliteProvider(BaseProvider): + """Persists runtime state checkpoints in a SQLite database. + + The ``directory`` argument to ``checkpoint`` / ``acheckpoint`` is + used as the database path (e.g. ``"./.checkpoints.db"``). + + Args: + max_checkpoints: Maximum number of checkpoints to retain. + Oldest rows are pruned after each write. None keeps all. + """ + + def __init__(self, max_checkpoints: int | None = None) -> None: + self.max_checkpoints = max_checkpoints + + def checkpoint(self, data: str, directory: str) -> str: + """Write a checkpoint to the SQLite database. + + Args: + data: The serialized JSON string to persist. + directory: Path to the SQLite database file. + + Returns: + A location string in the format ``"db_path#checkpoint_id"``. + """ + checkpoint_id, ts = _make_id() + Path(directory).parent.mkdir(parents=True, exist_ok=True) + with sqlite3.connect(directory) as conn: + conn.execute("PRAGMA journal_mode=WAL") + conn.execute(_CREATE_TABLE) + conn.execute(_INSERT, (checkpoint_id, ts, data)) + if self.max_checkpoints is not None: + conn.execute(_PRUNE, (self.max_checkpoints,)) + conn.commit() + return f"{directory}#{checkpoint_id}" + + async def acheckpoint(self, data: str, directory: str) -> str: + """Write a checkpoint to the SQLite database asynchronously. + + Args: + data: The serialized JSON string to persist. + directory: Path to the SQLite database file. + + Returns: + A location string in the format ``"db_path#checkpoint_id"``. + """ + checkpoint_id, ts = _make_id() + Path(directory).parent.mkdir(parents=True, exist_ok=True) + async with aiosqlite.connect(directory) as db: + await db.execute("PRAGMA journal_mode=WAL") + await db.execute(_CREATE_TABLE) + await db.execute(_INSERT, (checkpoint_id, ts, data)) + if self.max_checkpoints is not None: + await db.execute(_PRUNE, (self.max_checkpoints,)) + await db.commit() + return f"{directory}#{checkpoint_id}" + + def from_checkpoint(self, location: str) -> str: + """Read a checkpoint from the SQLite database. + + Args: + location: A location string returned by ``checkpoint()``. + + Returns: + The raw JSON string. + + Raises: + ValueError: If the checkpoint ID is not found. + """ + db_path, checkpoint_id = location.rsplit("#", 1) + with sqlite3.connect(db_path) as conn: + row = conn.execute(_SELECT, (checkpoint_id,)).fetchone() + if row is None: + raise ValueError(f"Checkpoint not found: {checkpoint_id}") + result: str = row[0] + return result + + async def afrom_checkpoint(self, location: str) -> str: + """Read a checkpoint from the SQLite database asynchronously. + + Args: + location: A location string returned by ``acheckpoint()``. + + Returns: + The raw JSON string. + + Raises: + ValueError: If the checkpoint ID is not found. + """ + db_path, checkpoint_id = location.rsplit("#", 1) + async with aiosqlite.connect(db_path) as db: + cursor = await db.execute(_SELECT, (checkpoint_id,)) + row = await cursor.fetchone() + if row is None: + raise ValueError(f"Checkpoint not found: {checkpoint_id}") + result: str = row[0] + return result From f98dde6c62b660349e8eb160269db51799f2dae5 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Tue, 7 Apr 2026 06:04:29 +0800 Subject: [PATCH 04/21] docs: add storage providers section, export JsonProvider --- docs/ar/concepts/checkpointing.mdx | 45 +++++++++++++++++++++++ docs/en/concepts/checkpointing.mdx | 49 +++++++++++++++++++++++++ docs/ko/concepts/checkpointing.mdx | 45 +++++++++++++++++++++++ docs/pt-BR/concepts/checkpointing.mdx | 45 +++++++++++++++++++++++ lib/crewai/src/crewai/state/__init__.py | 8 +++- 5 files changed, 191 insertions(+), 1 deletion(-) diff --git a/docs/ar/concepts/checkpointing.mdx b/docs/ar/concepts/checkpointing.mdx index 442a98bea..4fa3665dd 100644 --- a/docs/ar/concepts/checkpointing.mdx +++ b/docs/ar/concepts/checkpointing.mdx @@ -144,6 +144,51 @@ agent = Agent( result = agent.kickoff(messages=[{"role": "user", "content": "Research AI trends"}]) ``` +## مزودات التخزين + +يتضمن CrewAI مزودي تخزين لنقاط الحفظ. + +### JsonProvider (افتراضي) + +يكتب كل نقطة حفظ كملف JSON منفصل. + +```python +from crewai import Crew, CheckpointConfig +from crewai.state import JsonProvider + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=CheckpointConfig( + directory="./my_checkpoints", + provider=JsonProvider(), + max_checkpoints=5, + ), +) +``` + +### SqliteProvider + +يخزن جميع نقاط الحفظ في ملف قاعدة بيانات SQLite واحد. + +```python +from crewai import Crew, CheckpointConfig +from crewai.state import SqliteProvider + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=CheckpointConfig( + directory="./.checkpoints.db", + provider=SqliteProvider(max_checkpoints=50), + ), +) +``` + + +عند استخدام `SqliteProvider`، حقل `directory` هو مسار ملف قاعدة البيانات، وليس مجلدا. + + ## انواع الاحداث يقبل حقل `on_events` اي مجموعة من سلاسل انواع الاحداث. الخيارات الشائعة: diff --git a/docs/en/concepts/checkpointing.mdx b/docs/en/concepts/checkpointing.mdx index 799f674d3..dccdf1b1a 100644 --- a/docs/en/concepts/checkpointing.mdx +++ b/docs/en/concepts/checkpointing.mdx @@ -144,6 +144,55 @@ agent = Agent( result = agent.kickoff(messages=[{"role": "user", "content": "Research AI trends"}]) ``` +## Storage Providers + +CrewAI ships with two checkpoint storage providers. + +### JsonProvider (default) + +Writes each checkpoint as a separate JSON file. Simple, human-readable, easy to inspect. + +```python +from crewai import Crew, CheckpointConfig +from crewai.state import JsonProvider + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=CheckpointConfig( + directory="./my_checkpoints", + provider=JsonProvider(), # this is the default + max_checkpoints=5, # prunes oldest files + ), +) +``` + +Files are named `_.json` inside the directory. + +### SqliteProvider + +Stores all checkpoints in a single SQLite database file. Better for high-frequency checkpointing and avoids many small files. + +```python +from crewai import Crew, CheckpointConfig +from crewai.state import SqliteProvider + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=CheckpointConfig( + directory="./.checkpoints.db", + provider=SqliteProvider(max_checkpoints=50), + ), +) +``` + +`SqliteProvider` accepts its own `max_checkpoints` parameter that prunes old rows via SQL. WAL journal mode is enabled for concurrent read access. + + +When using `SqliteProvider`, the `directory` field is the database file path, not a directory. The `max_checkpoints` on `CheckpointConfig` controls filesystem pruning (for `JsonProvider`), while `SqliteProvider.max_checkpoints` controls row pruning in the database. + + ## Event Types The `on_events` field accepts any combination of event type strings. Common choices: diff --git a/docs/ko/concepts/checkpointing.mdx b/docs/ko/concepts/checkpointing.mdx index da33aa3c8..a08933faa 100644 --- a/docs/ko/concepts/checkpointing.mdx +++ b/docs/ko/concepts/checkpointing.mdx @@ -144,6 +144,51 @@ agent = Agent( result = agent.kickoff(messages=[{"role": "user", "content": "Research AI trends"}]) ``` +## 스토리지 프로바이더 + +CrewAI는 두 가지 체크포인트 스토리지 프로바이더를 제공합니다. + +### JsonProvider (기본값) + +각 체크포인트를 별도의 JSON 파일로 저장합니다. + +```python +from crewai import Crew, CheckpointConfig +from crewai.state import JsonProvider + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=CheckpointConfig( + directory="./my_checkpoints", + provider=JsonProvider(), + max_checkpoints=5, + ), +) +``` + +### SqliteProvider + +모든 체크포인트를 단일 SQLite 데이터베이스 파일에 저장합니다. + +```python +from crewai import Crew, CheckpointConfig +from crewai.state import SqliteProvider + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=CheckpointConfig( + directory="./.checkpoints.db", + provider=SqliteProvider(max_checkpoints=50), + ), +) +``` + + +`SqliteProvider`를 사용할 때 `directory` 필드는 디렉토리가 아닌 데이터베이스 파일 경로입니다. + + ## 이벤트 타입 `on_events` 필드는 이벤트 타입 문자열의 조합을 받습니다. 일반적인 선택: diff --git a/docs/pt-BR/concepts/checkpointing.mdx b/docs/pt-BR/concepts/checkpointing.mdx index 251691243..1ef7aedf3 100644 --- a/docs/pt-BR/concepts/checkpointing.mdx +++ b/docs/pt-BR/concepts/checkpointing.mdx @@ -144,6 +144,51 @@ agent = Agent( result = agent.kickoff(messages=[{"role": "user", "content": "Research AI trends"}]) ``` +## Provedores de Armazenamento + +O CrewAI inclui dois provedores de armazenamento para checkpoints. + +### JsonProvider (padrao) + +Grava cada checkpoint como um arquivo JSON separado. + +```python +from crewai import Crew, CheckpointConfig +from crewai.state import JsonProvider + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=CheckpointConfig( + directory="./my_checkpoints", + provider=JsonProvider(), + max_checkpoints=5, + ), +) +``` + +### SqliteProvider + +Armazena todos os checkpoints em um unico arquivo SQLite. + +```python +from crewai import Crew, CheckpointConfig +from crewai.state import SqliteProvider + +crew = Crew( + agents=[...], + tasks=[...], + checkpoint=CheckpointConfig( + directory="./.checkpoints.db", + provider=SqliteProvider(max_checkpoints=50), + ), +) +``` + + +Ao usar `SqliteProvider`, o campo `directory` e o caminho do arquivo de banco de dados, nao um diretorio. + + ## Tipos de Evento O campo `on_events` aceita qualquer combinacao de strings de tipo de evento. Escolhas comuns: diff --git a/lib/crewai/src/crewai/state/__init__.py b/lib/crewai/src/crewai/state/__init__.py index c08d19dcd..e97921ee0 100644 --- a/lib/crewai/src/crewai/state/__init__.py +++ b/lib/crewai/src/crewai/state/__init__.py @@ -1,5 +1,11 @@ from crewai.state.checkpoint_config import CheckpointConfig, CheckpointEventType +from crewai.state.provider.json_provider import JsonProvider from crewai.state.provider.sqlite_provider import SqliteProvider -__all__ = ["CheckpointConfig", "CheckpointEventType", "SqliteProvider"] +__all__ = [ + "CheckpointConfig", + "CheckpointEventType", + "JsonProvider", + "SqliteProvider", +] From 0c307f1621baeed3df8d23c09a02c8b8f935685e Mon Sep 17 00:00:00 2001 From: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com> Date: Mon, 6 Apr 2026 15:04:54 -0700 Subject: [PATCH 05/21] docs: update quickstart and installation guides for improved clarity (#5301) * docs: update quickstart and installation guides for improved clarity - Revised the quickstart guide to emphasize creating a Flow and running a single-agent crew that generates a report. - Updated the installation documentation to reflect changes in the quickstart process and enhance user understanding. * translations --- docs/ar/enterprise/guides/deploy-to-amp.mdx | 4 +- docs/ar/installation.mdx | 4 +- docs/ar/introduction.mdx | 4 +- docs/ar/quickstart.mdx | 442 +++++++----------- docs/en/enterprise/guides/deploy-to-amp.mdx | 4 +- docs/en/installation.mdx | 5 +- docs/en/introduction.mdx | 2 +- docs/en/quickstart.mdx | 438 +++++++---------- docs/ko/enterprise/guides/deploy-to-amp.mdx | 4 +- docs/ko/installation.mdx | 5 +- docs/ko/introduction.mdx | 4 +- docs/ko/quickstart.mdx | 441 +++++++---------- .../pt-BR/enterprise/guides/deploy-to-amp.mdx | 4 +- docs/pt-BR/installation.mdx | 5 +- docs/pt-BR/introduction.mdx | 2 +- docs/pt-BR/quickstart.mdx | 436 +++++++---------- 16 files changed, 686 insertions(+), 1118 deletions(-) diff --git a/docs/ar/enterprise/guides/deploy-to-amp.mdx b/docs/ar/enterprise/guides/deploy-to-amp.mdx index a7d7a137b..befc894d7 100644 --- a/docs/ar/enterprise/guides/deploy-to-amp.mdx +++ b/docs/ar/enterprise/guides/deploy-to-amp.mdx @@ -106,7 +106,7 @@ mode: "wide" ``` - يستغرق النشر الأول عادة 10-15 دقيقة لبناء صور الحاويات. عمليات النشر اللاحقة أسرع بكثير. + يستغرق النشر الأول عادة حوالي دقيقة واحدة. @@ -188,7 +188,7 @@ crewai deploy remove 1. انقر على زر "Deploy" لبدء عملية النشر 2. يمكنك مراقبة التقدم عبر شريط التقدم - 3. يستغرق النشر الأول عادة حوالي 10-15 دقيقة؛ عمليات النشر اللاحقة ستكون أسرع + 3. يستغرق النشر الأول عادة حوالي دقيقة واحدة ![تقدم النشر](/images/enterprise/deploy-progress.png) diff --git a/docs/ar/installation.mdx b/docs/ar/installation.mdx index 3e15010c2..3a902fae0 100644 --- a/docs/ar/installation.mdx +++ b/docs/ar/installation.mdx @@ -204,8 +204,8 @@ python3 --version ## الخطوات التالية - - اتبع دليل البداية السريعة لإنشاء أول Agent في CrewAI والحصول على تجربة عملية. + + اتبع البداية السريعة لإنشاء Flow وتشغيل طاقم بوكيل واحد وإنتاج تقرير. - اتبع دليل البداية السريعة لإنشاء أول Agent في CrewAI والحصول على تجربة عملية. + أنشئ Flow وشغّل طاقمًا بوكيل واحد وأنشئ تقريرًا من البداية للنهاية. -## ابنِ أول وكيل CrewAI +في هذا الدليل ستُنشئ **Flow** يحدد موضوع بحث، ويشغّل **طاقمًا بوكيل واحد** (باحث يستخدم البحث على الويب)، وينتهي بتقرير **Markdown** على القرص. يُعد Flow الطريقة الموصى بها لتنظيم التطبيقات الإنتاجية: يمتلك **الحالة** و**ترتيب التنفيذ**، بينما **الوكلاء** ينفّذون العمل داخل خطوة الطاقم. -لننشئ طاقماً بسيطاً يساعدنا في `البحث` و`إعداد التقارير` عن `أحدث تطورات الذكاء الاصطناعي` لموضوع أو مجال معين. +إذا لم تُكمل تثبيت CrewAI بعد، اتبع [دليل التثبيت](/ar/installation) أولًا. -قبل المتابعة، تأكد من إنهاء تثبيت CrewAI. -إذا لم تكن قد ثبّتها بعد، يمكنك القيام بذلك باتباع [دليل التثبيت](/ar/installation). +## المتطلبات الأساسية -اتبع الخطوات أدناه للبدء! +- بيئة Python وواجهة سطر أوامر CrewAI (راجع [التثبيت](/ar/installation)) +- نموذج لغوي مهيأ بالمفاتيح الصحيحة — راجع [LLMs](/ar/concepts/llms#setting-up-your-llm) +- مفتاح API من [Serper.dev](https://serper.dev/) (`SERPER_API_KEY`) للبحث على الويب في هذا الدرس + +## ابنِ أول Flow لك - - أنشئ مشروع طاقم جديد عبر تشغيل الأمر التالي في الطرفية. - سينشئ هذا مجلداً جديداً باسم `latest-ai-development` مع البنية الأساسية لطاقمك. + + من الطرفية، أنشئ مشروع Flow (اسم المجلد يستخدم شرطة سفلية، مثل `latest_ai_flow`): + ```shell Terminal - crewai create crew latest-ai-development + crewai create flow latest-ai-flow + cd latest_ai_flow ``` + + يُنشئ ذلك تطبيق Flow ضمن `src/latest_ai_flow/`، بما في ذلك طاقمًا أوليًا في `crews/content_crew/` ستستبدله بطاقم بحث **بوكيل واحد** في الخطوات التالية. - - - ```shell Terminal - cd latest_ai_development - ``` - - - - - يمكنك أيضاً تعديل الوكلاء حسب الحاجة ليناسبوا حالة الاستخدام الخاصة بك أو نسخ ولصق كما هو في مشروعك. - أي متغير مُستكمل في ملفات `agents.yaml` و`tasks.yaml` مثل `{topic}` سيُستبدل بقيمة المتغير في ملف `main.py`. - + + + استبدل محتوى `src/latest_ai_flow/crews/content_crew/config/agents.yaml` بباحث واحد. تُملأ المتغيرات مثل `{topic}` من `crew.kickoff(inputs=...)`. + ```yaml agents.yaml - # src/latest_ai_development/config/agents.yaml + # src/latest_ai_flow/crews/content_crew/config/agents.yaml researcher: role: > - {topic} Senior Data Researcher + باحث بيانات أول في {topic} goal: > - Uncover cutting-edge developments in {topic} + اكتشاف أحدث التطورات في {topic} backstory: > - You're a seasoned researcher with a knack for uncovering the latest - developments in {topic}. Known for your ability to find the most relevant - information and present it in a clear and concise manner. - - reporting_analyst: - role: > - {topic} Reporting Analyst - goal: > - Create detailed reports based on {topic} data analysis and research findings - backstory: > - You're a meticulous analyst with a keen eye for detail. You're known for - your ability to turn complex data into clear and concise reports, making - it easy for others to understand and act on the information you provide. + أنت باحث مخضرم تكشف أحدث المستجدات في {topic}. + تجد المعلومات الأكثر صلة وتعرضها بوضوح. ``` - + + ```yaml tasks.yaml - # src/latest_ai_development/config/tasks.yaml + # src/latest_ai_flow/crews/content_crew/config/tasks.yaml research_task: description: > - Conduct a thorough research about {topic} - Make sure you find any interesting and relevant information given - the current year is 2025. + أجرِ بحثًا معمقًا عن {topic}. استخدم البحث على الويب للعثور على معلومات + حديثة وموثوقة. السنة الحالية 2026. expected_output: > - A list with 10 bullet points of the most relevant information about {topic} + تقرير بصيغة Markdown بأقسام واضحة: الاتجاهات الرئيسية، أدوات أو شركات بارزة، + والآثار. بين 800 و1200 كلمة تقريبًا. دون إحاطة المستند بأكمله بكتل كود. agent: researcher - - reporting_task: - description: > - Review the context you got and expand each topic into a full section for a report. - Make sure the report is detailed and contains any and all relevant information. - expected_output: > - A fully fledge reports with the mains topics, each with a full section of information. - Formatted as markdown without '```' - agent: reporting_analyst - output_file: report.md + output_file: output/report.md ``` - - ```python crew.py - # src/latest_ai_development/crew.py - from crewai import Agent, Crew, Process, Task - from crewai.project import CrewBase, agent, crew, task - from crewai_tools import SerperDevTool - from crewai.agents.agent_builder.base_agent import BaseAgent + + + اجعل الطاقم المُولَّد يشير إلى YAML وأرفق `SerperDevTool` بالباحث. + + ```python content_crew.py + # src/latest_ai_flow/crews/content_crew/content_crew.py from typing import List + from crewai import Agent, Crew, Process, Task + from crewai.agents.agent_builder.base_agent import BaseAgent + from crewai.project import CrewBase, agent, crew, task + from crewai_tools import SerperDevTool + + @CrewBase - class LatestAiDevelopmentCrew(): - """LatestAiDevelopment crew""" + class ResearchCrew: + """طاقم بحث بوكيل واحد داخل Flow.""" agents: List[BaseAgent] tasks: List[Task] + agents_config = "config/agents.yaml" + tasks_config = "config/tasks.yaml" + @agent def researcher(self) -> Agent: return Agent( - config=self.agents_config['researcher'], # type: ignore[index] + config=self.agents_config["researcher"], # type: ignore[index] verbose=True, - tools=[SerperDevTool()] - ) - - @agent - def reporting_analyst(self) -> Agent: - return Agent( - config=self.agents_config['reporting_analyst'], # type: ignore[index] - verbose=True + tools=[SerperDevTool()], ) @task def research_task(self) -> Task: return Task( - config=self.tasks_config['research_task'], # type: ignore[index] - ) - - @task - def reporting_task(self) -> Task: - return Task( - config=self.tasks_config['reporting_task'], # type: ignore[index] - output_file='output/report.md' # This is the file that will be contain the final report. + config=self.tasks_config["research_task"], # type: ignore[index] ) @crew def crew(self) -> Crew: - """Creates the LatestAiDevelopment crew""" return Crew( - agents=self.agents, # Automatically created by the @agent decorator - tasks=self.tasks, # Automatically created by the @task decorator + agents=self.agents, + tasks=self.tasks, process=Process.sequential, verbose=True, ) ``` - - ```python crew.py - # src/latest_ai_development/crew.py - from crewai import Agent, Crew, Process, Task - from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff - from crewai_tools import SerperDevTool - @CrewBase - class LatestAiDevelopmentCrew(): - """LatestAiDevelopment crew""" + + اربط الطاقم بـ Flow: خطوة `@start()` تضبط الموضوع في **الحالة**، وخطوة `@listen` تشغّل الطاقم. يظل `output_file` للمهمة يكتب `output/report.md`. - @before_kickoff - def before_kickoff_function(self, inputs): - print(f"Before kickoff function with inputs: {inputs}") - return inputs # You can return the inputs or modify them as needed - - @after_kickoff - def after_kickoff_function(self, result): - print(f"After kickoff function with result: {result}") - return result # You can return the result or modify it as needed - - # ... remaining code - ``` - - - - على سبيل المثال، يمكنك تمرير مدخل `topic` لطاقمك لتخصيص البحث وإعداد التقارير. ```python main.py - #!/usr/bin/env python - # src/latest_ai_development/main.py - import sys - from latest_ai_development.crew import LatestAiDevelopmentCrew + # src/latest_ai_flow/main.py + from pydantic import BaseModel - def run(): - """ - Run the crew. - """ - inputs = { - 'topic': 'AI Agents' - } - LatestAiDevelopmentCrew().crew().kickoff(inputs=inputs) + from crewai.flow import Flow, listen, start + + from latest_ai_flow.crews.content_crew.content_crew import ResearchCrew + + + class ResearchFlowState(BaseModel): + topic: str = "" + report: str = "" + + + class LatestAiFlow(Flow[ResearchFlowState]): + @start() + def prepare_topic(self, crewai_trigger_payload: dict | None = None): + if crewai_trigger_payload: + self.state.topic = crewai_trigger_payload.get("topic", "AI Agents") + else: + self.state.topic = "AI Agents" + print(f"الموضوع: {self.state.topic}") + + @listen(prepare_topic) + def run_research(self): + result = ResearchCrew().crew().kickoff(inputs={"topic": self.state.topic}) + self.state.report = result.raw + print("اكتمل طاقم البحث.") + + @listen(run_research) + def summarize(self): + print("مسار التقرير: output/report.md") + + + def kickoff(): + LatestAiFlow().kickoff() + + + def plot(): + LatestAiFlow().plot() + + + if __name__ == "__main__": + kickoff() ``` - - - قبل تشغيل طاقمك، تأكد من تعيين المفاتيح التالية كمتغيرات بيئة في ملف `.env`: - - مفتاح API لـ [Serper.dev](https://serper.dev/): `SERPER_API_KEY=YOUR_KEY_HERE` - - إعداد النموذج الذي اخترته، مثل مفتاح API. راجع - [دليل إعداد LLM](/ar/concepts/llms#setting-up-your-llm) لمعرفة كيفية إعداد النماذج من أي مزود. - - - - اقفل التبعيات وثبّتها باستخدام أمر CLI: - - ```shell Terminal - crewai install - ``` - - - إذا كانت لديك حزم إضافية تريد تثبيتها، يمكنك القيام بذلك عبر: - - ```shell Terminal - uv add - ``` - - - - - لتشغيل طاقمك، نفّذ الأمر التالي في جذر مشروعك: - - ```bash Terminal - crewai run - ``` - + + إذا كان اسم الحزمة ليس `latest_ai_flow`، عدّل استيراد `ResearchCrew` ليطابق مسار الوحدة في مشروعك. + - - لمستخدمي CrewAI AMP، يمكنك إنشاء نفس الطاقم دون كتابة كود: + + في جذر المشروع، ضبط `.env`: -1. سجّل الدخول إلى حساب CrewAI AMP (أنشئ حساباً مجانياً على [app.crewai.com](https://app.crewai.com)) -2. افتح Crew Studio -3. اكتب ما هي الأتمتة التي تحاول بناءها -4. أنشئ مهامك بصرياً واربطها بالتسلسل -5. هيئ مدخلاتك وانقر "تحميل الكود" أو "نشر" - -![واجهة Crew Studio للبدء السريع](/images/enterprise/crew-studio-interface.png) - - - ابدأ حسابك المجاني في CrewAI AMP - + - `SERPER_API_KEY` — من [Serper.dev](https://serper.dev/) + - مفاتيح مزوّد النموذج حسب الحاجة — راجع [إعداد LLM](/ar/concepts/llms#setting-up-your-llm) - - يجب أن ترى المخرجات في وحدة التحكم ويجب إنشاء ملف `report.md` في جذر مشروعك مع التقرير النهائي. -إليك مثالاً على شكل التقرير: + + + ```shell Terminal + crewai install + crewai run + ``` + + + يُنفّذ `crewai run` نقطة دخول Flow المعرّفة في المشروع (نفس أمر الطواقم؛ نوع المشروع `"flow"` في `pyproject.toml`). + + + + يجب أن ترى سجلات من Flow والطاقم. افتح **`output/report.md`** للتقرير المُولَّد (مقتطف): ```markdown output/report.md - # Comprehensive Report on the Rise and Impact of AI Agents in 2025 + # وكلاء الذكاء الاصطناعي في 2026: المشهد والاتجاهات - ## 1. Introduction to AI Agents - In 2025, Artificial Intelligence (AI) agents are at the forefront of innovation across various industries. As intelligent systems that can perform tasks typically requiring human cognition, AI agents are paving the way for significant advancements in operational efficiency, decision-making, and overall productivity within sectors like Human Resources (HR) and Finance. This report aims to detail the rise of AI agents, their frameworks, applications, and potential implications on the workforce. + ## ملخص تنفيذي + … - ## 2. Benefits of AI Agents - AI agents bring numerous advantages that are transforming traditional work environments. Key benefits include: + ## أبرز الاتجاهات + - **استخدام الأدوات والتنسيق** — … + - **التبني المؤسسي** — … - - **Task Automation**: AI agents can carry out repetitive tasks such as data entry, scheduling, and payroll processing without human intervention, greatly reducing the time and resources spent on these activities. - - **Improved Efficiency**: By quickly processing large datasets and performing analyses that would take humans significantly longer, AI agents enhance operational efficiency. This allows teams to focus on strategic tasks that require higher-level thinking. - - **Enhanced Decision-Making**: AI agents can analyze trends and patterns in data, provide insights, and even suggest actions, helping stakeholders make informed decisions based on factual data rather than intuition alone. - - ## 3. Popular AI Agent Frameworks - Several frameworks have emerged to facilitate the development of AI agents, each with its own unique features and capabilities. Some of the most popular frameworks include: - - - **Autogen**: A framework designed to streamline the development of AI agents through automation of code generation. - - **Semantic Kernel**: Focuses on natural language processing and understanding, enabling agents to comprehend user intentions better. - - **Promptflow**: Provides tools for developers to create conversational agents that can navigate complex interactions seamlessly. - - **Langchain**: Specializes in leveraging various APIs to ensure agents can access and utilize external data effectively. - - **CrewAI**: Aimed at collaborative environments, CrewAI strengthens teamwork by facilitating communication through AI-driven insights. - - **MemGPT**: Combines memory-optimized architectures with generative capabilities, allowing for more personalized interactions with users. - - These frameworks empower developers to build versatile and intelligent agents that can engage users, perform advanced analytics, and execute various tasks aligned with organizational goals. - - ## 4. AI Agents in Human Resources - AI agents are revolutionizing HR practices by automating and optimizing key functions: - - - **Recruiting**: AI agents can screen resumes, schedule interviews, and even conduct initial assessments, thus accelerating the hiring process while minimizing biases. - - **Succession Planning**: AI systems analyze employee performance data and potential, helping organizations identify future leaders and plan appropriate training. - - **Employee Engagement**: Chatbots powered by AI can facilitate feedback loops between employees and management, promoting an open culture and addressing concerns promptly. - - As AI continues to evolve, HR departments leveraging these agents can realize substantial improvements in both efficiency and employee satisfaction. - - ## 5. AI Agents in Finance - The finance sector is seeing extensive integration of AI agents that enhance financial practices: - - - **Expense Tracking**: Automated systems manage and monitor expenses, flagging anomalies and offering recommendations based on spending patterns. - - **Risk Assessment**: AI models assess credit risk and uncover potential fraud by analyzing transaction data and behavioral patterns. - - **Investment Decisions**: AI agents provide stock predictions and analytics based on historical data and current market conditions, empowering investors with informative insights. - - The incorporation of AI agents into finance is fostering a more responsive and risk-aware financial landscape. - - ## 6. Market Trends and Investments - The growth of AI agents has attracted significant investment, especially amidst the rising popularity of chatbots and generative AI technologies. Companies and entrepreneurs are eager to explore the potential of these systems, recognizing their ability to streamline operations and improve customer engagement. - - Conversely, corporations like Microsoft are taking strides to integrate AI agents into their product offerings, with enhancements to their Copilot 365 applications. This strategic move emphasizes the importance of AI literacy in the modern workplace and indicates the stabilizing of AI agents as essential business tools. - - ## 7. Future Predictions and Implications - Experts predict that AI agents will transform essential aspects of work life. As we look toward the future, several anticipated changes include: - - - Enhanced integration of AI agents across all business functions, creating interconnected systems that leverage data from various departmental silos for comprehensive decision-making. - - Continued advancement of AI technologies, resulting in smarter, more adaptable agents capable of learning and evolving from user interactions. - - Increased regulatory scrutiny to ensure ethical use, especially concerning data privacy and employee surveillance as AI agents become more prevalent. - - To stay competitive and harness the full potential of AI agents, organizations must remain vigilant about latest developments in AI technology and consider continuous learning and adaptation in their strategic planning. - - ## 8. Conclusion - The emergence of AI agents is undeniably reshaping the workplace landscape in 5. With their ability to automate tasks, enhance efficiency, and improve decision-making, AI agents are critical in driving operational success. Organizations must embrace and adapt to AI developments to thrive in an increasingly digital business environment. + ## الآثار + … ``` - + + سيكون الملف الفعلي أطول ويعكس نتائج بحث مباشرة. +## كيف يترابط هذا + +1. **Flow** — يشغّل `LatestAiFlow` أولًا `prepare_topic` ثم `run_research` ثم `summarize`. الحالة (`topic`، `report`) على Flow. +2. **الطاقم** — يشغّل `ResearchCrew` مهمة واحدة بوكيل واحد: الباحث يستخدم **Serper** للبحث على الويب ثم يكتب التقرير. +3. **المُخرَج** — يكتب `output_file` للمهمة التقرير في `output/report.md`. + +للتعمق في أنماط Flow (التوجيه، الاستمرارية، الإنسان في الحلقة)، راجع [ابنِ أول Flow](/ar/guides/flows/first-flow) و[Flows](/ar/concepts/flows). للطواقم دون Flow، راجع [Crews](/ar/concepts/crews). لوكيل `Agent` واحد و`kickoff()` بلا مهام، راجع [Agents](/ar/concepts/agents#direct-agent-interaction-with-kickoff). + -تهانينا! - -لقد أعددت مشروع طاقمك بنجاح وأنت جاهز للبدء في بناء سير العمل الوكيلي الخاص بك! - +أصبح لديك Flow كامل مع طاقم وكيل وتقرير محفوظ — قاعدة قوية لإضافة خطوات أو طواقم أو أدوات. -### ملاحظة حول اتساق التسمية +### اتساق التسمية -يجب أن تتطابق الأسماء التي تستخدمها في ملفات YAML (`agents.yaml` و`tasks.yaml`) مع أسماء الدوال في كود Python الخاص بك. -على سبيل المثال، يمكنك الإشارة إلى الوكيل لمهام محددة من ملف `tasks.yaml`. -يتيح اتساق التسمية هذا لـ CrewAI ربط تكويناتك بكودك تلقائياً؛ وإلا فلن تتعرف مهمتك على المرجع بشكل صحيح. +يجب أن تطابق مفاتيح YAML (`researcher`، `research_task`) أسماء الدوال في صف `@CrewBase`. راجع [Crews](/ar/concepts/crews) لنمط الديكورات الكامل. -#### أمثلة على المراجع +## النشر - - لاحظ كيف نستخدم نفس الاسم للوكيل في ملف `agents.yaml` - (`email_summarizer`) واسم الدالة في ملف `crew.py` - (`email_summarizer`). - +ادفع Flow إلى **[CrewAI AMP](https://app.crewai.com)** بعد أن يعمل محليًا ويكون المشروع في مستودع **GitHub**. من جذر المشروع: -```yaml agents.yaml -email_summarizer: - role: > - Email Summarizer - goal: > - Summarize emails into a concise and clear summary - backstory: > - You will create a 5 bullet point summary of the report - llm: provider/model-id # Add your choice of model here + +```bash المصادقة +crewai login ``` - - لاحظ كيف نستخدم نفس الاسم للمهمة في ملف `tasks.yaml` - (`email_summarizer_task`) واسم الدالة في ملف `crew.py` - (`email_summarizer_task`). - - -```yaml tasks.yaml -email_summarizer_task: - description: > - Summarize the email into a 5 bullet point summary - expected_output: > - A 5 bullet point summary of the email - agent: email_summarizer - context: - - reporting_task - - research_task +```bash إنشاء نشر +crewai deploy create ``` -## نشر طاقمك +```bash الحالة والسجلات +crewai deploy status +crewai deploy logs +``` -أسهل طريقة لنشر طاقمك في الإنتاج هي من خلال [CrewAI AMP](http://app.crewai.com). +```bash إرسال التحديثات بعد تغيير الكود +crewai deploy push +``` -شاهد هذا الفيديو التعليمي لعرض خطوة بخطوة لنشر طاقمك على [CrewAI AMP](http://app.crewai.com) باستخدام CLI. +```bash عرض النشرات أو حذفها +crewai deploy list +crewai deploy remove +``` + - + + غالبًا ما يستغرق **النشر الأول حوالي دقيقة**. المتطلبات الكاملة ومسار الواجهة الويب في [النشر على AMP](/ar/enterprise/guides/deploy-to-amp). + - - ابدأ مع CrewAI AMP وانشر طاقمك في بيئة إنتاج - بنقرات قليلة فقط. + + النشر على AMP خطوة بخطوة (CLI ولوحة التحكم). - انضم إلى مجتمعنا مفتوح المصدر لمناقشة الأفكار ومشاركة مشاريعك والتواصل - مع مطورين آخرين لـ CrewAI. + ناقش الأفكار وشارك مشاريعك وتواصل مع مطوري CrewAI. diff --git a/docs/en/enterprise/guides/deploy-to-amp.mdx b/docs/en/enterprise/guides/deploy-to-amp.mdx index c0309c0b6..25f6896b8 100644 --- a/docs/en/enterprise/guides/deploy-to-amp.mdx +++ b/docs/en/enterprise/guides/deploy-to-amp.mdx @@ -106,7 +106,7 @@ The CLI automatically detects your project type from `pyproject.toml` and builds ``` - The first deployment typically takes 10-15 minutes as it builds the container images. Subsequent deployments are much faster. + The first deployment typically takes around 1 minute. @@ -188,7 +188,7 @@ You need to push your crew to a GitHub repository. If you haven't created a crew 1. Click the "Deploy" button to start the deployment process 2. You can monitor the progress through the progress bar - 3. The first deployment typically takes around 10-15 minutes; subsequent deployments will be faster + 3. The first deployment typically takes around 1 minute ![Deploy Progress](/images/enterprise/deploy-progress.png) diff --git a/docs/en/installation.mdx b/docs/en/installation.mdx index c6899d6e6..727f71220 100644 --- a/docs/en/installation.mdx +++ b/docs/en/installation.mdx @@ -207,9 +207,8 @@ For teams and organizations, CrewAI offers enterprise deployment options that el ## Next Steps - - Follow our quickstart guide to create your first CrewAI agent and get - hands-on experience. + + Follow the quickstart to scaffold a Flow, run a one-agent crew, and produce a report. - Follow our quickstart guide to create your first CrewAI agent and get hands-on experience. + Scaffold a Flow, run a crew with one agent, and generate a report end to end. -## Build your first CrewAI Agent +In this guide you will **create a Flow** that sets a research topic, runs a **crew with one agent** (a researcher using web search), and ends with a **markdown report** on disk. Flows are the recommended way to structure production apps: they own **state** and **execution order**, while **agents** do the work inside a crew step. -Let's create a simple crew that will help us `research` and `report` on the `latest AI developments` for a given topic or subject. +If you have not installed CrewAI yet, follow the [installation guide](/en/installation) first. -Before we proceed, make sure you have finished installing CrewAI. -If you haven't installed them yet, you can do so by following the [installation guide](/en/installation). +## Prerequisites -Follow the steps below to get Crewing! 🚣‍♂️ +- Python environment and the CrewAI CLI (see [installation](/en/installation)) +- An LLM configured with the right API keys — see [LLMs](/en/concepts/llms#setting-up-your-llm) +- A [Serper.dev](https://serper.dev/) API key (`SERPER_API_KEY`) for web search in this tutorial + +## Build your first Flow - - Create a new crew project by running the following command in your terminal. - This will create a new directory called `latest-ai-development` with the basic structure for your crew. + + From your terminal, scaffold a Flow project (the folder name uses underscores, e.g. `latest_ai_flow`): + ```shell Terminal - crewai create crew latest-ai-development + crewai create flow latest-ai-flow + cd latest_ai_flow ``` + + This creates a Flow app under `src/latest_ai_flow/`, including a starter crew under `crews/content_crew/` that you will replace with a minimal **single-agent** research crew in the next steps. - - - ```shell Terminal - cd latest_ai_development - ``` - - - - - You can also modify the agents as needed to fit your use case or copy and paste as is to your project. - Any variable interpolated in your `agents.yaml` and `tasks.yaml` files like `{topic}` will be replaced by the value of the variable in the `main.py` file. - + + + Replace the contents of `src/latest_ai_flow/crews/content_crew/config/agents.yaml` with a single researcher. Variables like `{topic}` are filled from `crew.kickoff(inputs=...)`. + ```yaml agents.yaml - # src/latest_ai_development/config/agents.yaml + # src/latest_ai_flow/crews/content_crew/config/agents.yaml researcher: role: > {topic} Senior Data Researcher @@ -53,336 +51,232 @@ Follow the steps below to get Crewing! 🚣‍♂️ Uncover cutting-edge developments in {topic} backstory: > You're a seasoned researcher with a knack for uncovering the latest - developments in {topic}. Known for your ability to find the most relevant - information and present it in a clear and concise manner. - - reporting_analyst: - role: > - {topic} Reporting Analyst - goal: > - Create detailed reports based on {topic} data analysis and research findings - backstory: > - You're a meticulous analyst with a keen eye for detail. You're known for - your ability to turn complex data into clear and concise reports, making - it easy for others to understand and act on the information you provide. + developments in {topic}. You find the most relevant information and + present it clearly. ``` - + + ```yaml tasks.yaml - # src/latest_ai_development/config/tasks.yaml + # src/latest_ai_flow/crews/content_crew/config/tasks.yaml research_task: description: > - Conduct a thorough research about {topic} - Make sure you find any interesting and relevant information given - the current year is 2025. + Conduct thorough research about {topic}. Use web search to find current, + credible information. The current year is 2026. expected_output: > - A list with 10 bullet points of the most relevant information about {topic} + A markdown report with clear sections: key trends, notable tools or companies, + and implications. Aim for 800–1200 words. No fenced code blocks around the whole document. agent: researcher - - reporting_task: - description: > - Review the context you got and expand each topic into a full section for a report. - Make sure the report is detailed and contains any and all relevant information. - expected_output: > - A fully fledge reports with the mains topics, each with a full section of information. - Formatted as markdown without '```' - agent: reporting_analyst - output_file: report.md + output_file: output/report.md ``` - - ```python crew.py - # src/latest_ai_development/crew.py - from crewai import Agent, Crew, Process, Task - from crewai.project import CrewBase, agent, crew, task - from crewai_tools import SerperDevTool - from crewai.agents.agent_builder.base_agent import BaseAgent + + + Point the generated crew at your YAML and attach `SerperDevTool` to the researcher. + + ```python content_crew.py + # src/latest_ai_flow/crews/content_crew/content_crew.py from typing import List + from crewai import Agent, Crew, Process, Task + from crewai.agents.agent_builder.base_agent import BaseAgent + from crewai.project import CrewBase, agent, crew, task + from crewai_tools import SerperDevTool + + @CrewBase - class LatestAiDevelopmentCrew(): - """LatestAiDevelopment crew""" + class ResearchCrew: + """Single-agent research crew used inside the Flow.""" agents: List[BaseAgent] tasks: List[Task] + agents_config = "config/agents.yaml" + tasks_config = "config/tasks.yaml" + @agent def researcher(self) -> Agent: return Agent( - config=self.agents_config['researcher'], # type: ignore[index] + config=self.agents_config["researcher"], # type: ignore[index] verbose=True, - tools=[SerperDevTool()] - ) - - @agent - def reporting_analyst(self) -> Agent: - return Agent( - config=self.agents_config['reporting_analyst'], # type: ignore[index] - verbose=True + tools=[SerperDevTool()], ) @task def research_task(self) -> Task: return Task( - config=self.tasks_config['research_task'], # type: ignore[index] - ) - - @task - def reporting_task(self) -> Task: - return Task( - config=self.tasks_config['reporting_task'], # type: ignore[index] - output_file='output/report.md' # This is the file that will be contain the final report. + config=self.tasks_config["research_task"], # type: ignore[index] ) @crew def crew(self) -> Crew: - """Creates the LatestAiDevelopment crew""" return Crew( - agents=self.agents, # Automatically created by the @agent decorator - tasks=self.tasks, # Automatically created by the @task decorator + agents=self.agents, + tasks=self.tasks, process=Process.sequential, verbose=True, ) ``` - - ```python crew.py - # src/latest_ai_development/crew.py - from crewai import Agent, Crew, Process, Task - from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff - from crewai_tools import SerperDevTool - @CrewBase - class LatestAiDevelopmentCrew(): - """LatestAiDevelopment crew""" + + Connect the crew to a Flow: a `@start()` step sets the topic in **state**, and a `@listen` step runs the crew. The task’s `output_file` still writes `output/report.md`. - @before_kickoff - def before_kickoff_function(self, inputs): - print(f"Before kickoff function with inputs: {inputs}") - return inputs # You can return the inputs or modify them as needed - - @after_kickoff - def after_kickoff_function(self, result): - print(f"After kickoff function with result: {result}") - return result # You can return the result or modify it as needed - - # ... remaining code - ``` - - - - For example, you can pass the `topic` input to your crew to customize the research and reporting. ```python main.py - #!/usr/bin/env python - # src/latest_ai_development/main.py - import sys - from latest_ai_development.crew import LatestAiDevelopmentCrew + # src/latest_ai_flow/main.py + from pydantic import BaseModel - def run(): - """ - Run the crew. - """ - inputs = { - 'topic': 'AI Agents' - } - LatestAiDevelopmentCrew().crew().kickoff(inputs=inputs) + from crewai.flow import Flow, listen, start + + from latest_ai_flow.crews.content_crew.content_crew import ResearchCrew + + + class ResearchFlowState(BaseModel): + topic: str = "" + report: str = "" + + + class LatestAiFlow(Flow[ResearchFlowState]): + @start() + def prepare_topic(self, crewai_trigger_payload: dict | None = None): + if crewai_trigger_payload: + self.state.topic = crewai_trigger_payload.get("topic", "AI Agents") + else: + self.state.topic = "AI Agents" + print(f"Topic: {self.state.topic}") + + @listen(prepare_topic) + def run_research(self): + result = ResearchCrew().crew().kickoff(inputs={"topic": self.state.topic}) + self.state.report = result.raw + print("Research crew finished.") + + @listen(run_research) + def summarize(self): + print("Report path: output/report.md") + + + def kickoff(): + LatestAiFlow().kickoff() + + + def plot(): + LatestAiFlow().plot() + + + if __name__ == "__main__": + kickoff() ``` - - - Before running your crew, make sure you have the following keys set as environment variables in your `.env` file: - - A [Serper.dev](https://serper.dev/) API key: `SERPER_API_KEY=YOUR_KEY_HERE` - - The configuration for your choice of model, such as an API key. See the - [LLM setup guide](/en/concepts/llms#setting-up-your-llm) to learn how to configure models from any provider. - - - - Lock the dependencies and install them by using the CLI command: - - ```shell Terminal - crewai install - ``` - - - If you have additional packages that you want to install, you can do so by running: - - ```shell Terminal - uv add - ``` - - - - - To run your crew, execute the following command in the root of your project: - - ```bash Terminal - crewai run - ``` - + + If your package name differs from `latest_ai_flow`, change the import of `ResearchCrew` to match your project’s module path. + - - For CrewAI AMP users, you can create the same crew without writing code: + + In `.env` at the project root, set: -1. Log in to your CrewAI AMP account (create a free account at [app.crewai.com](https://app.crewai.com)) -2. Open Crew Studio -3. Type what is the automation you're trying to build -4. Create your tasks visually and connect them in sequence -5. Configure your inputs and click "Download Code" or "Deploy" - -![Crew Studio Quickstart](/images/enterprise/crew-studio-interface.png) - - - Start your free account at CrewAI AMP - + - `SERPER_API_KEY` — from [Serper.dev](https://serper.dev/) + - Your model provider keys as required — see [LLM setup](/en/concepts/llms#setting-up-your-llm) - - You should see the output in the console and the `report.md` file should be created in the root of your project with the final report. -Here's an example of what the report should look like: + + + ```shell Terminal + crewai install + crewai run + ``` + + + `crewai run` executes the Flow entrypoint defined in your project (same command as for crews; project type is `"flow"` in `pyproject.toml`). + + + + + + + You should see logs from the Flow and the crew. Open **`output/report.md`** for the generated report (excerpt): ```markdown output/report.md - # Comprehensive Report on the Rise and Impact of AI Agents in 2025 + # AI Agents in 2026: Landscape and Trends - ## 1. Introduction to AI Agents - In 2025, Artificial Intelligence (AI) agents are at the forefront of innovation across various industries. As intelligent systems that can perform tasks typically requiring human cognition, AI agents are paving the way for significant advancements in operational efficiency, decision-making, and overall productivity within sectors like Human Resources (HR) and Finance. This report aims to detail the rise of AI agents, their frameworks, applications, and potential implications on the workforce. + ## Executive summary + … - ## 2. Benefits of AI Agents - AI agents bring numerous advantages that are transforming traditional work environments. Key benefits include: + ## Key trends + - **Tool use and orchestration** — … + - **Enterprise adoption** — … - - **Task Automation**: AI agents can carry out repetitive tasks such as data entry, scheduling, and payroll processing without human intervention, greatly reducing the time and resources spent on these activities. - - **Improved Efficiency**: By quickly processing large datasets and performing analyses that would take humans significantly longer, AI agents enhance operational efficiency. This allows teams to focus on strategic tasks that require higher-level thinking. - - **Enhanced Decision-Making**: AI agents can analyze trends and patterns in data, provide insights, and even suggest actions, helping stakeholders make informed decisions based on factual data rather than intuition alone. - - ## 3. Popular AI Agent Frameworks - Several frameworks have emerged to facilitate the development of AI agents, each with its own unique features and capabilities. Some of the most popular frameworks include: - - - **Autogen**: A framework designed to streamline the development of AI agents through automation of code generation. - - **Semantic Kernel**: Focuses on natural language processing and understanding, enabling agents to comprehend user intentions better. - - **Promptflow**: Provides tools for developers to create conversational agents that can navigate complex interactions seamlessly. - - **Langchain**: Specializes in leveraging various APIs to ensure agents can access and utilize external data effectively. - - **CrewAI**: Aimed at collaborative environments, CrewAI strengthens teamwork by facilitating communication through AI-driven insights. - - **MemGPT**: Combines memory-optimized architectures with generative capabilities, allowing for more personalized interactions with users. - - These frameworks empower developers to build versatile and intelligent agents that can engage users, perform advanced analytics, and execute various tasks aligned with organizational goals. - - ## 4. AI Agents in Human Resources - AI agents are revolutionizing HR practices by automating and optimizing key functions: - - - **Recruiting**: AI agents can screen resumes, schedule interviews, and even conduct initial assessments, thus accelerating the hiring process while minimizing biases. - - **Succession Planning**: AI systems analyze employee performance data and potential, helping organizations identify future leaders and plan appropriate training. - - **Employee Engagement**: Chatbots powered by AI can facilitate feedback loops between employees and management, promoting an open culture and addressing concerns promptly. - - As AI continues to evolve, HR departments leveraging these agents can realize substantial improvements in both efficiency and employee satisfaction. - - ## 5. AI Agents in Finance - The finance sector is seeing extensive integration of AI agents that enhance financial practices: - - - **Expense Tracking**: Automated systems manage and monitor expenses, flagging anomalies and offering recommendations based on spending patterns. - - **Risk Assessment**: AI models assess credit risk and uncover potential fraud by analyzing transaction data and behavioral patterns. - - **Investment Decisions**: AI agents provide stock predictions and analytics based on historical data and current market conditions, empowering investors with informative insights. - - The incorporation of AI agents into finance is fostering a more responsive and risk-aware financial landscape. - - ## 6. Market Trends and Investments - The growth of AI agents has attracted significant investment, especially amidst the rising popularity of chatbots and generative AI technologies. Companies and entrepreneurs are eager to explore the potential of these systems, recognizing their ability to streamline operations and improve customer engagement. - - Conversely, corporations like Microsoft are taking strides to integrate AI agents into their product offerings, with enhancements to their Copilot 365 applications. This strategic move emphasizes the importance of AI literacy in the modern workplace and indicates the stabilizing of AI agents as essential business tools. - - ## 7. Future Predictions and Implications - Experts predict that AI agents will transform essential aspects of work life. As we look toward the future, several anticipated changes include: - - - Enhanced integration of AI agents across all business functions, creating interconnected systems that leverage data from various departmental silos for comprehensive decision-making. - - Continued advancement of AI technologies, resulting in smarter, more adaptable agents capable of learning and evolving from user interactions. - - Increased regulatory scrutiny to ensure ethical use, especially concerning data privacy and employee surveillance as AI agents become more prevalent. - - To stay competitive and harness the full potential of AI agents, organizations must remain vigilant about latest developments in AI technology and consider continuous learning and adaptation in their strategic planning. - - ## 8. Conclusion - The emergence of AI agents is undeniably reshaping the workplace landscape in 5. With their ability to automate tasks, enhance efficiency, and improve decision-making, AI agents are critical in driving operational success. Organizations must embrace and adapt to AI developments to thrive in an increasingly digital business environment. + ## Implications + … ``` - + + Your actual file will be longer and reflect live search results. +## How this run fits together + +1. **Flow** — `LatestAiFlow` runs `prepare_topic` first, then `run_research`, then `summarize`. State (`topic`, `report`) lives on the Flow. +2. **Crew** — `ResearchCrew` runs one task with one agent: the researcher uses **Serper** to search the web, then writes the structured report. +3. **Artifact** — The task’s `output_file` writes the report under `output/report.md`. + +To go deeper on Flow patterns (routing, persistence, human-in-the-loop), see [Build your first Flow](/en/guides/flows/first-flow) and [Flows](/en/concepts/flows). For crews without a Flow, see [Crews](/en/concepts/crews). For a single `Agent` and `kickoff()` without tasks, see [Agents](/en/concepts/agents#direct-agent-interaction-with-kickoff). + -Congratulations! - -You have successfully set up your crew project and are ready to start building your own agentic workflows! - +You now have an end-to-end Flow with an agent crew and a saved report — a solid base to add more steps, crews, or tools. -### Note on Consistency in Naming +### Naming consistency -The names you use in your YAML files (`agents.yaml` and `tasks.yaml`) should match the method names in your Python code. -For example, you can reference the agent for specific tasks from `tasks.yaml` file. -This naming consistency allows CrewAI to automatically link your configurations with your code; otherwise, your task won't recognize the reference properly. +YAML keys (`researcher`, `research_task`) must match the method names on your `@CrewBase` class. See [Crews](/en/concepts/crews) for the full decorator pattern. -#### Example References +## Deploying - - Note how we use the same name for the agent in the `agents.yaml` - (`email_summarizer`) file as the method name in the `crew.py` - (`email_summarizer`) file. - +Push your Flow to **[CrewAI AMP](https://app.crewai.com)** once it runs locally and your project is in a **GitHub** repository. From the project root: -```yaml agents.yaml -email_summarizer: - role: > - Email Summarizer - goal: > - Summarize emails into a concise and clear summary - backstory: > - You will create a 5 bullet point summary of the report - llm: provider/model-id # Add your choice of model here + +```bash Authenticate +crewai login ``` - - Note how we use the same name for the task in the `tasks.yaml` - (`email_summarizer_task`) file as the method name in the `crew.py` - (`email_summarizer_task`) file. - - -```yaml tasks.yaml -email_summarizer_task: - description: > - Summarize the email into a 5 bullet point summary - expected_output: > - A 5 bullet point summary of the email - agent: email_summarizer - context: - - reporting_task - - research_task +```bash Create deployment +crewai deploy create ``` -## Deploying Your Crew +```bash Check status & logs +crewai deploy status +crewai deploy logs +``` -The easiest way to deploy your crew to production is through [CrewAI AMP](http://app.crewai.com). +```bash Ship updates after you change code +crewai deploy push +``` -Watch this video tutorial for a step-by-step demonstration of deploying your crew to [CrewAI AMP](http://app.crewai.com) using the CLI. +```bash List or remove deployments +crewai deploy list +crewai deploy remove +``` + - + + The first deploy usually takes **around 1 minute**. Full prerequisites and the web UI flow are in [Deploy to AMP](/en/enterprise/guides/deploy-to-amp). + - - Get started with CrewAI AMP and deploy your crew in a production environment - with just a few clicks. + + Step-by-step AMP deployment (CLI and dashboard). - Join our open source community to discuss ideas, share your projects, and - connect with other CrewAI developers. + Discuss ideas, share projects, and connect with other CrewAI developers. diff --git a/docs/ko/enterprise/guides/deploy-to-amp.mdx b/docs/ko/enterprise/guides/deploy-to-amp.mdx index 66954c840..2a519b9d3 100644 --- a/docs/ko/enterprise/guides/deploy-to-amp.mdx +++ b/docs/ko/enterprise/guides/deploy-to-amp.mdx @@ -105,7 +105,7 @@ CLI는 `pyproject.toml`에서 프로젝트 유형을 자동으로 감지하고 ``` - 첫 배포는 컨테이너 이미지를 빌드하므로 일반적으로 10~15분 정도 소요됩니다. 이후 배포는 훨씬 빠릅니다. + 첫 배포는 보통 약 1분 정도 소요됩니다. @@ -187,7 +187,7 @@ Crew를 GitHub 저장소에 푸시해야 합니다. 아직 Crew를 만들지 않 1. "Deploy" 버튼을 클릭하여 배포 프로세스를 시작합니다. 2. 진행 바를 통해 진행 상황을 모니터링할 수 있습니다. - 3. 첫 번째 배포에는 일반적으로 약 10-15분 정도 소요되며, 이후 배포는 더 빠릅니다. + 3. 첫 번째 배포에는 일반적으로 약 1분 정도 소요됩니다 ![Deploy Progress](/images/enterprise/deploy-progress.png) diff --git a/docs/ko/installation.mdx b/docs/ko/installation.mdx index e73cfdf8c..fc47d796b 100644 --- a/docs/ko/installation.mdx +++ b/docs/ko/installation.mdx @@ -197,9 +197,8 @@ CrewAI는 의존성 관리와 패키지 처리를 위해 `uv`를 사용합니다 ## 다음 단계 - - 빠른 시작 가이드를 따라 CrewAI 에이전트를 처음 만들어보고 직접 경험해 - 보세요. + + Flow를 만들고 에이전트 한 명짜리 crew를 실행해 보고서까지 만드는 방법을 따라 해 보세요. - 빠른 시작 가이드를 따라 첫 번째 CrewAI agent를 만들고 직접 경험해 보세요. + Flow를 만들고 에이전트 한 명 crew를 실행해 끝까지 보고서를 생성해 보세요. -## 첫 번째 CrewAI Agent 만들기 +이 가이드에서는 **Flow**를 만들어 연구 주제를 정하고, **에이전트 한 명으로 구성된 crew**(웹 검색을 쓰는 연구원)를 실행한 뒤, 디스크에 **Markdown 보고서**를 남깁니다. Flow는 프로덕션 앱을 구성하는 권장 방식으로, **상태**와 **실행 순서**를 담당하고 **에이전트**는 crew 단계 안에서 실제 작업을 수행합니다. -이제 주어진 주제나 항목에 대해 `최신 AI 개발 동향`을 `연구`하고 `보고`하는 간단한 crew를 만들어보겠습니다. +CrewAI를 아직 설치하지 않았다면 먼저 [설치 가이드](/ko/installation)를 따르세요. -진행하기 전에 CrewAI 설치를 완료했는지 확인하세요. -아직 설치하지 않았다면, [설치 가이드](/ko/installation)를 참고해 설치할 수 있습니다. +## 사전 요건 -아래 단계를 따라 Crewing을 시작하세요! 🚣‍♂️ +- Python 환경과 CrewAI CLI([설치](/ko/installation) 참고) +- 올바른 API 키로 설정한 LLM — [LLM](/ko/concepts/llms#setting-up-your-llm) 참고 +- 이 튜토리얼의 웹 검색용 [Serper.dev](https://serper.dev/) API 키(`SERPER_API_KEY`) + +## 첫 번째 Flow 만들기 - - 터미널에서 아래 명령어를 실행하여 새로운 crew 프로젝트를 만드세요. - 이 작업은 `latest-ai-development`라는 새 디렉터리와 기본 구조를 생성합니다. + + 터미널에서 Flow 프로젝트를 생성합니다(폴더 이름은 밑줄 형식입니다. 예: `latest_ai_flow`). + ```shell Terminal - crewai create crew latest-ai-development + crewai create flow latest-ai-flow + cd latest_ai_flow ``` + + 이렇게 하면 `src/latest_ai_flow/` 아래에 Flow 앱이 만들어지고, 다음 단계에서 **단일 에이전트** 연구 crew로 바꿀 시작용 crew가 `crews/content_crew/`에 포함됩니다. - - - ```shell Terminal - cd latest_ai_development - ``` - - - - - 프로젝트에 맞게 agent를 수정하거나 복사/붙여넣기를 할 수 있습니다. - `agents.yaml` 및 `tasks.yaml` 파일에서 `{topic}`과 같은 변수를 사용하면, 이는 `main.py` 파일의 변수 값으로 대체됩니다. - + + + `src/latest_ai_flow/crews/content_crew/config/agents.yaml` 내용을 한 명의 연구원만 남기도록 바꿉니다. `{topic}` 같은 변수는 `crew.kickoff(inputs=...)`로 채워집니다. + ```yaml agents.yaml - # src/latest_ai_development/config/agents.yaml + # src/latest_ai_flow/crews/content_crew/config/agents.yaml researcher: role: > - {topic} Senior Data Researcher + {topic} 시니어 데이터 리서처 goal: > - Uncover cutting-edge developments in {topic} + {topic} 분야의 최신 동향을 파악한다 backstory: > - You're a seasoned researcher with a knack for uncovering the latest - developments in {topic}. Known for your ability to find the most relevant - information and present it in a clear and concise manner. - - reporting_analyst: - role: > - {topic} Reporting Analyst - goal: > - Create detailed reports based on {topic} data analysis and research findings - backstory: > - You're a meticulous analyst with a keen eye for detail. You're known for - your ability to turn complex data into clear and concise reports, making - it easy for others to understand and act on the information you provide. + 당신은 {topic}의 최신 흐름을 찾아내는 데 능숙한 연구원입니다. + 가장 관련성 높은 정보를 찾아 명확하게 전달합니다. ``` - + + ```yaml tasks.yaml - # src/latest_ai_development/config/tasks.yaml + # src/latest_ai_flow/crews/content_crew/config/tasks.yaml research_task: description: > - Conduct a thorough research about {topic} - Make sure you find any interesting and relevant information given - the current year is 2025. + {topic}에 대해 철저히 조사하세요. 웹 검색으로 최신이고 신뢰할 수 있는 정보를 찾으세요. + 현재 연도는 2026년입니다. expected_output: > - A list with 10 bullet points of the most relevant information about {topic} + 마크다운 보고서로, 주요 트렌드·주목할 도구나 기업·시사점 등으로 섹션을 나누세요. + 분량은 약 800~1200단어. 문서 전체를 코드 펜스로 감싸지 마세요. agent: researcher - - reporting_task: - description: > - Review the context you got and expand each topic into a full section for a report. - Make sure the report is detailed and contains any and all relevant information. - expected_output: > - A fully fledge reports with the mains topics, each with a full section of information. - Formatted as markdown without '```' - agent: reporting_analyst - output_file: report.md + output_file: output/report.md ``` - - ```python crew.py - # src/latest_ai_development/crew.py - from crewai import Agent, Crew, Process, Task - from crewai.project import CrewBase, agent, crew, task - from crewai_tools import SerperDevTool - from crewai.agents.agent_builder.base_agent import BaseAgent + + + 생성된 crew가 YAML을 읽고 연구원에게 `SerperDevTool`을 붙이도록 합니다. + + ```python content_crew.py + # src/latest_ai_flow/crews/content_crew/content_crew.py from typing import List + from crewai import Agent, Crew, Process, Task + from crewai.agents.agent_builder.base_agent import BaseAgent + from crewai.project import CrewBase, agent, crew, task + from crewai_tools import SerperDevTool + + @CrewBase - class LatestAiDevelopmentCrew(): - """LatestAiDevelopment crew""" + class ResearchCrew: + """Flow 안에서 사용하는 단일 에이전트 연구 crew.""" agents: List[BaseAgent] tasks: List[Task] + agents_config = "config/agents.yaml" + tasks_config = "config/tasks.yaml" + @agent def researcher(self) -> Agent: return Agent( - config=self.agents_config['researcher'], # type: ignore[index] + config=self.agents_config["researcher"], # type: ignore[index] verbose=True, - tools=[SerperDevTool()] - ) - - @agent - def reporting_analyst(self) -> Agent: - return Agent( - config=self.agents_config['reporting_analyst'], # type: ignore[index] - verbose=True + tools=[SerperDevTool()], ) @task def research_task(self) -> Task: return Task( - config=self.tasks_config['research_task'], # type: ignore[index] - ) - - @task - def reporting_task(self) -> Task: - return Task( - config=self.tasks_config['reporting_task'], # type: ignore[index] - output_file='output/report.md' # This is the file that will be contain the final report. + config=self.tasks_config["research_task"], # type: ignore[index] ) @crew def crew(self) -> Crew: - """Creates the LatestAiDevelopment crew""" return Crew( - agents=self.agents, # Automatically created by the @agent decorator - tasks=self.tasks, # Automatically created by the @task decorator + agents=self.agents, + tasks=self.tasks, process=Process.sequential, verbose=True, ) ``` - - ```python crew.py - # src/latest_ai_development/crew.py - from crewai import Agent, Crew, Process, Task - from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff - from crewai_tools import SerperDevTool - @CrewBase - class LatestAiDevelopmentCrew(): - """LatestAiDevelopment crew""" + + crew를 Flow에 연결합니다: `@start()` 단계에서 주제를 **상태**에 넣고, `@listen` 단계에서 crew를 실행합니다. 작업의 `output_file`은 그대로 `output/report.md`에 씁니다. - @before_kickoff - def before_kickoff_function(self, inputs): - print(f"Before kickoff function with inputs: {inputs}") - return inputs # You can return the inputs or modify them as needed - - @after_kickoff - def after_kickoff_function(self, result): - print(f"After kickoff function with result: {result}") - return result # You can return the result or modify it as needed - - # ... remaining code - ``` - - - - 예를 들어, crew에 `topic` 입력값을 넘겨 연구 및 보고서 출력을 맞춤화할 수 있습니다. ```python main.py - #!/usr/bin/env python - # src/latest_ai_development/main.py - import sys - from latest_ai_development.crew import LatestAiDevelopmentCrew + # src/latest_ai_flow/main.py + from pydantic import BaseModel - def run(): - """ - Run the crew. - """ - inputs = { - 'topic': 'AI Agents' - } - LatestAiDevelopmentCrew().crew().kickoff(inputs=inputs) + from crewai.flow import Flow, listen, start + + from latest_ai_flow.crews.content_crew.content_crew import ResearchCrew + + + class ResearchFlowState(BaseModel): + topic: str = "" + report: str = "" + + + class LatestAiFlow(Flow[ResearchFlowState]): + @start() + def prepare_topic(self, crewai_trigger_payload: dict | None = None): + if crewai_trigger_payload: + self.state.topic = crewai_trigger_payload.get("topic", "AI Agents") + else: + self.state.topic = "AI Agents" + print(f"주제: {self.state.topic}") + + @listen(prepare_topic) + def run_research(self): + result = ResearchCrew().crew().kickoff(inputs={"topic": self.state.topic}) + self.state.report = result.raw + print("연구 crew 실행 완료.") + + @listen(run_research) + def summarize(self): + print("보고서 경로: output/report.md") + + + def kickoff(): + LatestAiFlow().kickoff() + + + def plot(): + LatestAiFlow().plot() + + + if __name__ == "__main__": + kickoff() ``` - - - crew를 실행하기 전에 `.env` 파일에 아래 키가 환경 변수로 설정되어 있는지 확인하세요: - - [Serper.dev](https://serper.dev/) API 키: `SERPER_API_KEY=YOUR_KEY_HERE` - - 사용하려는 모델의 설정, 예: API 키. 다양한 공급자의 모델 설정은 - [LLM 설정 가이드](/ko/concepts/llms#setting-up-your-llm)를 참고하세요. - - - - CLI 명령어로 의존성을 잠그고 설치하세요: - - ```shell Terminal - crewai install - ``` - - - 추가 설치가 필요한 패키지가 있다면, 아래와 같이 실행하면 됩니다: - - ```shell Terminal - uv add - ``` - - - - - 프로젝트 루트에서 다음 명령어로 crew를 실행하세요: - - ```bash Terminal - crewai run - ``` - + + 패키지 이름이 `latest_ai_flow`가 아니면 `ResearchCrew` import 경로를 프로젝트 모듈 경로에 맞게 바꾸세요. + - - CrewAI AMP 사용자는 코드를 작성하지 않고도 동일한 crew를 생성할 수 있습니다: + + 프로젝트 루트의 `.env`에 다음을 설정합니다. -1. CrewAI AMP 계정에 로그인하세요([app.crewai.com](https://app.crewai.com)에서 무료 계정 만들기) -2. Crew Studio 열기 -3. 구현하려는 자동화 내용을 입력하세요 -4. 미션을 시각적으로 생성하고 순차적으로 연결하세요 -5. 입력값을 구성하고 "Download Code" 또는 "Deploy"를 클릭하세요 - -![Crew Studio Quickstart](/images/enterprise/crew-studio-interface.png) - - - CrewAI AOP에서 무료 계정을 시작하세요 - + - `SERPER_API_KEY` — [Serper.dev](https://serper.dev/)에서 발급 + - 모델 제공자 키 — [LLM 설정](/ko/concepts/llms#setting-up-your-llm) 참고 - - 콘솔에서 출력 결과를 확인할 수 있으며 프로젝트 루트에 `report.md` 파일로 최종 보고서가 생성됩니다. -보고서 예시는 다음과 같습니다: + + + ```shell Terminal + crewai install + crewai run + ``` + + + `crewai run`은 프로젝트에 정의된 Flow 진입점을 실행합니다(crew와 동일한 명령이며, `pyproject.toml`의 프로젝트 유형은 `"flow"`입니다). + + + + Flow와 crew 로그가 출력되어야 합니다. 생성된 보고서는 **`output/report.md`**에서 확인하세요(발췌): ```markdown output/report.md - # Comprehensive Report on the Rise and Impact of AI Agents in 2025 + # 2026년 AI 에이전트: 동향과 전망 - ## 1. Introduction to AI Agents - In 2025, Artificial Intelligence (AI) agents are at the forefront of innovation across various industries. As intelligent systems that can perform tasks typically requiring human cognition, AI agents are paving the way for significant advancements in operational efficiency, decision-making, and overall productivity within sectors like Human Resources (HR) and Finance. This report aims to detail the rise of AI agents, their frameworks, applications, and potential implications on the workforce. + ## 요약 + … - ## 2. Benefits of AI Agents - AI agents bring numerous advantages that are transforming traditional work environments. Key benefits include: + ## 주요 트렌드 + - **도구 사용과 오케스트레이션** — … + - **엔터프라이즈 도입** — … - - **Task Automation**: AI agents can carry out repetitive tasks such as data entry, scheduling, and payroll processing without human intervention, greatly reducing the time and resources spent on these activities. - - **Improved Efficiency**: By quickly processing large datasets and performing analyses that would take humans significantly longer, AI agents enhance operational efficiency. This allows teams to focus on strategic tasks that require higher-level thinking. - - **Enhanced Decision-Making**: AI agents can analyze trends and patterns in data, provide insights, and even suggest actions, helping stakeholders make informed decisions based on factual data rather than intuition alone. - - ## 3. Popular AI Agent Frameworks - Several frameworks have emerged to facilitate the development of AI agents, each with its own unique features and capabilities. Some of the most popular frameworks include: - - - **Autogen**: A framework designed to streamline the development of AI agents through automation of code generation. - - **Semantic Kernel**: Focuses on natural language processing and understanding, enabling agents to comprehend user intentions better. - - **Promptflow**: Provides tools for developers to create conversational agents that can navigate complex interactions seamlessly. - - **Langchain**: Specializes in leveraging various APIs to ensure agents can access and utilize external data effectively. - - **CrewAI**: Aimed at collaborative environments, CrewAI strengthens teamwork by facilitating communication through AI-driven insights. - - **MemGPT**: Combines memory-optimized architectures with generative capabilities, allowing for more personalized interactions with users. - - These frameworks empower developers to build versatile and intelligent agents that can engage users, perform advanced analytics, and execute various tasks aligned with organizational goals. - - ## 4. AI Agents in Human Resources - AI agents are revolutionizing HR practices by automating and optimizing key functions: - - - **Recruiting**: AI agents can screen resumes, schedule interviews, and even conduct initial assessments, thus accelerating the hiring process while minimizing biases. - - **Succession Planning**: AI systems analyze employee performance data and potential, helping organizations identify future leaders and plan appropriate training. - - **Employee Engagement**: Chatbots powered by AI can facilitate feedback loops between employees and management, promoting an open culture and addressing concerns promptly. - - As AI continues to evolve, HR departments leveraging these agents can realize substantial improvements in both efficiency and employee satisfaction. - - ## 5. AI Agents in Finance - The finance sector is seeing extensive integration of AI agents that enhance financial practices: - - - **Expense Tracking**: Automated systems manage and monitor expenses, flagging anomalies and offering recommendations based on spending patterns. - - **Risk Assessment**: AI models assess credit risk and uncover potential fraud by analyzing transaction data and behavioral patterns. - - **Investment Decisions**: AI agents provide stock predictions and analytics based on historical data and current market conditions, empowering investors with informative insights. - - The incorporation of AI agents into finance is fostering a more responsive and risk-aware financial landscape. - - ## 6. Market Trends and Investments - The growth of AI agents has attracted significant investment, especially amidst the rising popularity of chatbots and generative AI technologies. Companies and entrepreneurs are eager to explore the potential of these systems, recognizing their ability to streamline operations and improve customer engagement. - - Conversely, corporations like Microsoft are taking strides to integrate AI agents into their product offerings, with enhancements to their Copilot 365 applications. This strategic move emphasizes the importance of AI literacy in the modern workplace and indicates the stabilizing of AI agents as essential business tools. - - ## 7. Future Predictions and Implications - Experts predict that AI agents will transform essential aspects of work life. As we look toward the future, several anticipated changes include: - - - Enhanced integration of AI agents across all business functions, creating interconnected systems that leverage data from various departmental silos for comprehensive decision-making. - - Continued advancement of AI technologies, resulting in smarter, more adaptable agents capable of learning and evolving from user interactions. - - Increased regulatory scrutiny to ensure ethical use, especially concerning data privacy and employee surveillance as AI agents become more prevalent. - - To stay competitive and harness the full potential of AI agents, organizations must remain vigilant about latest developments in AI technology and consider continuous learning and adaptation in their strategic planning. - - ## 8. Conclusion - The emergence of AI agents is undeniably reshaping the workplace landscape in 5. With their ability to automate tasks, enhance efficiency, and improve decision-making, AI agents are critical in driving operational success. Organizations must embrace and adapt to AI developments to thrive in an increasingly digital business environment. + ## 시사점 + … ``` - + + 실제 파일은 더 길고 실시간 검색 결과를 반영합니다. +## 한 번에 이해하기 + +1. **Flow** — `LatestAiFlow`는 `prepare_topic` → `run_research` → `summarize` 순으로 실행됩니다. 상태(`topic`, `report`)는 Flow에 있습니다. +2. **Crew** — `ResearchCrew`는 에이전트 한 명·작업 하나로 실행됩니다. 연구원이 **Serper**로 웹을 검색하고 구조화된 보고서를 씁니다. +3. **결과물** — 작업의 `output_file`이 `output/report.md`에 보고서를 씁니다. + +Flow 패턴(라우팅, 지속성, human-in-the-loop)을 더 보려면 [첫 Flow 만들기](/ko/guides/flows/first-flow)와 [Flows](/ko/concepts/flows)를 참고하세요. Flow 없이 crew만 쓰려면 [Crews](/ko/concepts/crews)를, 작업 없이 단일 `Agent`의 `kickoff()`만 쓰려면 [Agents](/ko/concepts/agents#direct-agent-interaction-with-kickoff)를 참고하세요. + -축하합니다! - -crew 프로젝트 설정이 완료되었으며, 이제 자신만의 agentic workflow 구축을 바로 시작하실 수 있습니다! - +에이전트 crew와 저장된 보고서까지 이어진 Flow를 완성했습니다. 이제 단계·crew·도구를 더해 확장할 수 있습니다. -### 명명 일관성에 대한 참고 +### 이름 일치 -YAML 파일(`agents.yaml` 및 `tasks.yaml`)에서 사용하는 이름은 Python 코드의 메서드 이름과 일치해야 합니다. -예를 들어, 특정 task에 대한 agent를 `tasks.yaml` 파일에서 참조할 수 있습니다. -이러한 명명 일관성을 지키면 CrewAI가 설정과 코드를 자동으로 연결할 수 있습니다. 그렇지 않으면 task가 참조를 제대로 인식하지 못할 수 있습니다. +YAML 키(`researcher`, `research_task`)는 `@CrewBase` 클래스의 메서드 이름과 같아야 합니다. 전체 데코레이터 패턴은 [Crews](/ko/concepts/crews)를 참고하세요. -#### 예시 참조 +## 배포 - - `agents.yaml` (`email_summarizer`) 파일에서 에이전트 이름과 `crew.py` - (`email_summarizer`) 파일에서 메서드 이름이 동일하게 사용되는 점에 주목하세요. - +로컬에서 정상 실행되고 프로젝트가 **GitHub** 저장소에 있으면 Flow를 **[CrewAI AMP](https://app.crewai.com)**에 올릴 수 있습니다. 프로젝트 루트에서: -```yaml agents.yaml -email_summarizer: - role: > - Email Summarizer - goal: > - Summarize emails into a concise and clear summary - backstory: > - You will create a 5 bullet point summary of the report - llm: provider/model-id # Add your choice of model here + +```bash 인증 +crewai login ``` - - `tasks.yaml` (`email_summarizer_task`) 파일에서 태스크 이름과 `crew.py` - (`email_summarizer_task`) 파일에서 메서드 이름이 동일하게 사용되는 점에 - 주목하세요. - - -```yaml tasks.yaml -email_summarizer_task: - description: > - Summarize the email into a 5 bullet point summary - expected_output: > - A 5 bullet point summary of the email - agent: email_summarizer - context: - - reporting_task - - research_task +```bash 배포 생성 +crewai deploy create ``` -## Crew 배포하기 +```bash 상태 및 로그 +crewai deploy status +crewai deploy logs +``` -production 환경에 crew를 배포하는 가장 쉬운 방법은 [CrewAI AMP](http://app.crewai.com)를 통해서입니다. +```bash 코드 변경 후 반영 +crewai deploy push +``` -CLI를 사용하여 [CrewAI AMP](http://app.crewai.com)에 crew를 배포하는 단계별 시연은 이 영상 튜토리얼을 참고하세요. +```bash 배포 목록 또는 삭제 +crewai deploy list +crewai deploy remove +``` + - + + 첫 배포는 보통 **약 1분** 정도 걸립니다. 전체 사전 요건과 웹 UI 절차는 [AMP에 배포](/ko/enterprise/guides/deploy-to-amp)를 참고하세요. + - - CrewAI AOP로 시작하여 몇 번의 클릭만으로 production 환경에 crew를 - 배포하세요. + + AMP 배포 단계별 안내(CLI 및 대시보드). - 오픈 소스 커뮤니티에 참여하여 아이디어를 나누고, 프로젝트를 공유하며, 다른 - CrewAI 개발자들과 소통하세요. + 아이디어를 나누고 프로젝트를 공유하며 다른 CrewAI 개발자와 소통하세요. diff --git a/docs/pt-BR/enterprise/guides/deploy-to-amp.mdx b/docs/pt-BR/enterprise/guides/deploy-to-amp.mdx index 7d469b993..db70a2711 100644 --- a/docs/pt-BR/enterprise/guides/deploy-to-amp.mdx +++ b/docs/pt-BR/enterprise/guides/deploy-to-amp.mdx @@ -105,7 +105,7 @@ A CLI detecta automaticamente o tipo do seu projeto a partir do `pyproject.toml` ``` - A primeira implantação normalmente leva de 10 a 15 minutos, pois as imagens dos containers são construídas. As próximas implantações são bem mais rápidas. + A primeira implantação normalmente leva cerca de 1 minuto. @@ -187,7 +187,7 @@ Você precisa enviar seu crew para um repositório do GitHub. Caso ainda não te 1. Clique no botão "Deploy" para iniciar o processo de implantação 2. Você pode monitorar o progresso pela barra de progresso - 3. A primeira implantação geralmente demora de 10 a 15 minutos; as próximas serão mais rápidas + 3. A primeira implantação geralmente demora cerca de 1 minuto ![Progresso da Implantação](/images/enterprise/deploy-progress.png) diff --git a/docs/pt-BR/installation.mdx b/docs/pt-BR/installation.mdx index ffb2bbcaf..868778af8 100644 --- a/docs/pt-BR/installation.mdx +++ b/docs/pt-BR/installation.mdx @@ -200,12 +200,11 @@ Para equipes e organizações, o CrewAI oferece opções de implantação corpor - Siga nosso guia de início rápido para criar seu primeiro agente CrewAI e - obter experiência prática. + Siga o guia rápido para gerar um Flow, executar um crew com um agente e produzir um relatório. - Siga nosso guia rápido para criar seu primeiro agente CrewAI e colocar a mão na massa. + Gere um Flow, execute um crew com um agente e produza um relatório ponta a ponta. -## Construa seu primeiro Agente CrewAI +Neste guia você vai **criar um Flow** que define um tópico de pesquisa, executa um **crew com um agente** (um pesquisador com busca na web) e termina com um **relatório em Markdown** no disco. Flows são a forma recomendada de estruturar apps em produção: eles controlam **estado** e **ordem de execução**, enquanto os **agentes** fazem o trabalho dentro da etapa do crew. -Vamos criar uma tripulação simples que nos ajudará a `pesquisar` e `relatar` sobre os `últimos avanços em IA` para um determinado tópico ou assunto. +Se ainda não instalou o CrewAI, siga primeiro o [guia de instalação](/pt-BR/installation). -Antes de prosseguir, certifique-se de ter concluído a instalação da CrewAI. -Se ainda não instalou, faça isso seguindo o [guia de instalação](/pt-BR/installation). +## Pré-requisitos -Siga os passos abaixo para começar a tripular! 🚣‍♂️ +- Ambiente Python e a CLI do CrewAI (veja [instalação](/pt-BR/installation)) +- Um LLM configurado com as chaves corretas — veja [LLMs](/pt-BR/concepts/llms#setting-up-your-llm) +- Uma chave de API do [Serper.dev](https://serper.dev/) (`SERPER_API_KEY`) para busca na web neste tutorial + +## Construa seu primeiro Flow - - Crie um novo projeto de tripulação executando o comando abaixo em seu terminal. - Isso criará um novo diretório chamado `latest-ai-development` com a estrutura básica para sua tripulação. + + No terminal, gere um projeto Flow (o nome da pasta usa sublinhados, ex.: `latest_ai_flow`): + ```shell Terminal - crewai create crew latest-ai-development + crewai create flow latest-ai-flow + cd latest_ai_flow ``` + + Isso cria um app Flow em `src/latest_ai_flow/`, incluindo um crew inicial em `crews/content_crew/` que você substituirá por um crew de pesquisa **com um único agente** nos próximos passos. - - - ```shell Terminal - cd latest_ai_development - ``` - - - - - Você também pode modificar os agentes conforme necessário para atender ao seu caso de uso ou copiar e colar como está para seu projeto. - Qualquer variável interpolada nos seus arquivos `agents.yaml` e `tasks.yaml`, como `{topic}`, será substituída pelo valor da variável no arquivo `main.py`. - + + + Substitua o conteúdo de `src/latest_ai_flow/crews/content_crew/config/agents.yaml` por um único pesquisador. Variáveis como `{topic}` são preenchidas a partir de `crew.kickoff(inputs=...)`. + ```yaml agents.yaml - # src/latest_ai_development/config/agents.yaml + # src/latest_ai_flow/crews/content_crew/config/agents.yaml researcher: role: > - Pesquisador Sênior de Dados em {topic} + Pesquisador(a) Sênior de Dados em {topic} goal: > - Descobrir os avanços mais recentes em {topic} + Descobrir os desenvolvimentos mais recentes em {topic} backstory: > - Você é um pesquisador experiente com talento para descobrir os últimos avanços em {topic}. Conhecido por sua habilidade em encontrar as informações mais relevantes e apresentá-las de forma clara e concisa. - - reporting_analyst: - role: > - Analista de Relatórios em {topic} - goal: > - Criar relatórios detalhados com base na análise de dados e descobertas de pesquisa em {topic} - backstory: > - Você é um analista meticuloso com um olhar atento aos detalhes. É conhecido por sua capacidade de transformar dados complexos em relatórios claros e concisos, facilitando o entendimento e a tomada de decisão por parte dos outros. + Você é um pesquisador experiente que descobre os últimos avanços em {topic}. + Encontra as informações mais relevantes e apresenta tudo com clareza. ``` - + + ```yaml tasks.yaml - # src/latest_ai_development/config/tasks.yaml + # src/latest_ai_flow/crews/content_crew/config/tasks.yaml research_task: description: > - Realize uma pesquisa aprofundada sobre {topic}. - Certifique-se de encontrar informações interessantes e relevantes considerando que o ano atual é 2025. + Faça uma pesquisa aprofundada sobre {topic}. Use busca na web para obter + informações atuais e confiáveis. O ano atual é 2026. expected_output: > - Uma lista com 10 tópicos dos dados mais relevantes sobre {topic} + Um relatório em markdown com seções claras: tendências principais, ferramentas + ou empresas relevantes e implicações. Entre 800 e 1200 palavras. Sem cercas de código em volta do documento inteiro. agent: researcher - - reporting_task: - description: > - Revise o contexto obtido e expanda cada tópico em uma seção completa para um relatório. - Certifique-se de que o relatório seja detalhado e contenha todas as informações relevantes. - expected_output: > - Um relatório completo com os principais tópicos, cada um com uma seção detalhada de informações. - Formate como markdown sem usar '```' - agent: reporting_analyst - output_file: report.md + output_file: output/report.md ``` - - ```python crew.py - # src/latest_ai_development/crew.py - from crewai import Agent, Crew, Process, Task - from crewai.project import CrewBase, agent, crew, task - from crewai_tools import SerperDevTool - from crewai.agents.agent_builder.base_agent import BaseAgent + + + Aponte o crew gerado para o YAML e anexe `SerperDevTool` ao pesquisador. + + ```python content_crew.py + # src/latest_ai_flow/crews/content_crew/content_crew.py from typing import List + from crewai import Agent, Crew, Process, Task + from crewai.agents.agent_builder.base_agent import BaseAgent + from crewai.project import CrewBase, agent, crew, task + from crewai_tools import SerperDevTool + + @CrewBase - class LatestAiDevelopmentCrew(): - """LatestAiDevelopment crew""" + class ResearchCrew: + """Crew de pesquisa com um agente, usado dentro do Flow.""" agents: List[BaseAgent] tasks: List[Task] + agents_config = "config/agents.yaml" + tasks_config = "config/tasks.yaml" + @agent def researcher(self) -> Agent: return Agent( - config=self.agents_config['researcher'], # type: ignore[index] + config=self.agents_config["researcher"], # type: ignore[index] verbose=True, - tools=[SerperDevTool()] - ) - - @agent - def reporting_analyst(self) -> Agent: - return Agent( - config=self.agents_config['reporting_analyst'], # type: ignore[index] - verbose=True + tools=[SerperDevTool()], ) @task def research_task(self) -> Task: return Task( - config=self.tasks_config['research_task'], # type: ignore[index] - ) - - @task - def reporting_task(self) -> Task: - return Task( - config=self.tasks_config['reporting_task'], # type: ignore[index] - output_file='output/report.md' # Este é o arquivo que conterá o relatório final. + config=self.tasks_config["research_task"], # type: ignore[index] ) @crew def crew(self) -> Crew: - """Creates the LatestAiDevelopment crew""" return Crew( - agents=self.agents, # Criado automaticamente pelo decorador @agent - tasks=self.tasks, # Criado automaticamente pelo decorador @task + agents=self.agents, + tasks=self.tasks, process=Process.sequential, verbose=True, ) ``` - - ```python crew.py - # src/latest_ai_development/crew.py - from crewai import Agent, Crew, Process, Task - from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff - from crewai_tools import SerperDevTool - @CrewBase - class LatestAiDevelopmentCrew(): - """LatestAiDevelopment crew""" + + Conecte o crew a um Flow: um passo `@start()` define o tópico no **estado** e um `@listen` executa o crew. O `output_file` da tarefa continua gravando `output/report.md`. - @before_kickoff - def before_kickoff_function(self, inputs): - print(f"Before kickoff function with inputs: {inputs}") - return inputs # You can return the inputs or modify them as needed - - @after_kickoff - def after_kickoff_function(self, result): - print(f"After kickoff function with result: {result}") - return result # You can return the result or modify it as needed - - # ... remaining code - ``` - - - - Por exemplo, você pode passar o input `topic` para sua tripulação para personalizar a pesquisa e o relatório. ```python main.py - #!/usr/bin/env python - # src/latest_ai_development/main.py - import sys - from latest_ai_development.crew import LatestAiDevelopmentCrew + # src/latest_ai_flow/main.py + from pydantic import BaseModel - def run(): - """ - Run the crew. - """ - inputs = { - 'topic': 'AI Agents' - } - LatestAiDevelopmentCrew().crew().kickoff(inputs=inputs) + from crewai.flow import Flow, listen, start + + from latest_ai_flow.crews.content_crew.content_crew import ResearchCrew + + + class ResearchFlowState(BaseModel): + topic: str = "" + report: str = "" + + + class LatestAiFlow(Flow[ResearchFlowState]): + @start() + def prepare_topic(self, crewai_trigger_payload: dict | None = None): + if crewai_trigger_payload: + self.state.topic = crewai_trigger_payload.get("topic", "AI Agents") + else: + self.state.topic = "AI Agents" + print(f"Tópico: {self.state.topic}") + + @listen(prepare_topic) + def run_research(self): + result = ResearchCrew().crew().kickoff(inputs={"topic": self.state.topic}) + self.state.report = result.raw + print("Crew de pesquisa concluído.") + + @listen(run_research) + def summarize(self): + print("Relatório em: output/report.md") + + + def kickoff(): + LatestAiFlow().kickoff() + + + def plot(): + LatestAiFlow().plot() + + + if __name__ == "__main__": + kickoff() ``` - - - Antes de executar sua tripulação, certifique-se de ter as seguintes chaves configuradas como variáveis de ambiente no seu arquivo `.env`: - - Uma chave da API do [Serper.dev](https://serper.dev/): `SERPER_API_KEY=YOUR_KEY_HERE` - - A configuração do modelo de sua escolha, como uma chave de API. Veja o - [guia de configuração do LLM](/pt-BR/concepts/llms#setting-up-your-llm) para aprender como configurar modelos de qualquer provedor. - - - - Trave e instale as dependências utilizando o comando da CLI: - - ```shell Terminal - crewai install - ``` - - - Se quiser instalar pacotes adicionais, faça isso executando: - - ```shell Terminal - uv add - ``` - - - - - Para executar sua tripulação, rode o seguinte comando na raiz do projeto: - - ```bash Terminal - crewai run - ``` - + + Se o nome do pacote não for `latest_ai_flow`, ajuste o import de `ResearchCrew` para o caminho de módulo do seu projeto. + - - Para usuários do CrewAI AMP, você pode criar a mesma tripulação sem escrever código: + + Na raiz do projeto, no arquivo `.env`, defina: -1. Faça login na sua conta CrewAI AMP (crie uma conta gratuita em [app.crewai.com](https://app.crewai.com)) -2. Abra o Crew Studio -3. Digite qual automação deseja construir -4. Crie suas tarefas visualmente e conecte-as em sequência -5. Configure seus inputs e clique em "Download Code" ou "Deploy" - -![Crew Studio Quickstart](/images/enterprise/crew-studio-interface.png) - - - Comece sua conta gratuita no CrewAI AMP - + - `SERPER_API_KEY` — obtida em [Serper.dev](https://serper.dev/) + - As chaves do provedor de modelo conforme necessário — veja [configuração de LLM](/pt-BR/concepts/llms#setting-up-your-llm) - - Você verá a saída no console e o arquivo `report.md` deve ser criado na raiz do seu projeto com o relatório final. -Veja um exemplo de como o relatório deve ser: + + + ```shell Terminal + crewai install + crewai run + ``` + + + O `crewai run` executa o ponto de entrada do Flow definido no projeto (o mesmo comando dos crews; o tipo do projeto é `"flow"` no `pyproject.toml`). + + + + Você deve ver logs do Flow e do crew. Abra **`output/report.md`** para o relatório gerado (trecho): ```markdown output/report.md - # Relatório Abrangente sobre a Ascensão e o Impacto dos Agentes de IA em 2025 + # Agentes de IA em 2026: panorama e tendências - ## 1. Introduction to AI Agents - In 2025, Artificial Intelligence (AI) agents are at the forefront of innovation across various industries. As intelligent systems that can perform tasks typically requiring human cognition, AI agents are paving the way for significant advancements in operational efficiency, decision-making, and overall productivity within sectors like Human Resources (HR) and Finance. This report aims to detail the rise of AI agents, their frameworks, applications, and potential implications on the workforce. + ## Resumo executivo + … - ## 2. Benefits of AI Agents - AI agents bring numerous advantages that are transforming traditional work environments. Key benefits include: + ## Principais tendências + - **Uso de ferramentas e orquestração** — … + - **Adoção empresarial** — … - - **Task Automation**: AI agents can carry out repetitive tasks such as data entry, scheduling, and payroll processing without human intervention, greatly reducing the time and resources spent on these activities. - - **Improved Efficiency**: By quickly processing large datasets and performing analyses that would take humans significantly longer, AI agents enhance operational efficiency. This allows teams to focus on strategic tasks that require higher-level thinking. - - **Enhanced Decision-Making**: AI agents can analyze trends and patterns in data, provide insights, and even suggest actions, helping stakeholders make informed decisions based on factual data rather than intuition alone. - - ## 3. Popular AI Agent Frameworks - Several frameworks have emerged to facilitate the development of AI agents, each with its own unique features and capabilities. Some of the most popular frameworks include: - - - **Autogen**: A framework designed to streamline the development of AI agents through automation of code generation. - - **Semantic Kernel**: Focuses on natural language processing and understanding, enabling agents to comprehend user intentions better. - - **Promptflow**: Provides tools for developers to create conversational agents that can navigate complex interactions seamlessly. - - **Langchain**: Specializes in leveraging various APIs to ensure agents can access and utilize external data effectively. - - **CrewAI**: Aimed at collaborative environments, CrewAI strengthens teamwork by facilitating communication through AI-driven insights. - - **MemGPT**: Combines memory-optimized architectures with generative capabilities, allowing for more personalized interactions with users. - - These frameworks empower developers to build versatile and intelligent agents that can engage users, perform advanced analytics, and execute various tasks aligned with organizational goals. - - ## 4. AI Agents in Human Resources - AI agents are revolutionizing HR practices by automating and optimizing key functions: - - - **Recruiting**: AI agents can screen resumes, schedule interviews, and even conduct initial assessments, thus accelerating the hiring process while minimizing biases. - - **Succession Planning**: AI systems analyze employee performance data and potential, helping organizations identify future leaders and plan appropriate training. - - **Employee Engagement**: Chatbots powered by AI can facilitate feedback loops between employees and management, promoting an open culture and addressing concerns promptly. - - As AI continues to evolve, HR departments leveraging these agents can realize substantial improvements in both efficiency and employee satisfaction. - - ## 5. AI Agents in Finance - The finance sector is seeing extensive integration of AI agents that enhance financial practices: - - - **Expense Tracking**: Automated systems manage and monitor expenses, flagging anomalies and offering recommendations based on spending patterns. - - **Risk Assessment**: AI models assess credit risk and uncover potential fraud by analyzing transaction data and behavioral patterns. - - **Investment Decisions**: AI agents provide stock predictions and analytics based on historical data and current market conditions, empowering investors with informative insights. - - The incorporation of AI agents into finance is fostering a more responsive and risk-aware financial landscape. - - ## 6. Market Trends and Investments - The growth of AI agents has attracted significant investment, especially amidst the rising popularity of chatbots and generative AI technologies. Companies and entrepreneurs are eager to explore the potential of these systems, recognizing their ability to streamline operations and improve customer engagement. - - Conversely, corporations like Microsoft are taking strides to integrate AI agents into their product offerings, with enhancements to their Copilot 365 applications. This strategic move emphasizes the importance of AI literacy in the modern workplace and indicates the stabilizing of AI agents as essential business tools. - - ## 7. Future Predictions and Implications - Experts predict that AI agents will transform essential aspects of work life. As we look toward the future, several anticipated changes include: - - - Enhanced integration of AI agents across all business functions, creating interconnected systems that leverage data from various departmental silos for comprehensive decision-making. - - Continued advancement of AI technologies, resulting in smarter, more adaptable agents capable of learning and evolving from user interactions. - - Increased regulatory scrutiny to ensure ethical use, especially concerning data privacy and employee surveillance as AI agents become more prevalent. - - To stay competitive and harness the full potential of AI agents, organizations must remain vigilant about latest developments in AI technology and consider continuous learning and adaptation in their strategic planning. - - ## 8. Conclusion - The emergence of AI agents is undeniably reshaping the workplace landscape in 5. With their ability to automate tasks, enhance efficiency, and improve decision-making, AI agents are critical in driving operational success. Organizations must embrace and adapt to AI developments to thrive in an increasingly digital business environment. + ## Implicações + … ``` - + + O arquivo real será mais longo e refletirá resultados de busca ao vivo. +## Como isso se encaixa + +1. **Flow** — `LatestAiFlow` executa `prepare_topic`, depois `run_research`, depois `summarize`. O estado (`topic`, `report`) fica no Flow. +2. **Crew** — `ResearchCrew` executa uma tarefa com um agente: o pesquisador usa **Serper** na web e escreve o relatório. +3. **Artefato** — O `output_file` da tarefa grava o relatório em `output/report.md`. + +Para ir além em Flows (roteamento, persistência, human-in-the-loop), veja [Construa seu primeiro Flow](/pt-BR/guides/flows/first-flow) e [Flows](/pt-BR/concepts/flows). Para crews sem Flow, veja [Crews](/pt-BR/concepts/crews). Para um único `Agent` com `kickoff()` sem tarefas, veja [Agents](/pt-BR/concepts/agents#direct-agent-interaction-with-kickoff). + -Parabéns! - -Você configurou seu projeto de tripulação com sucesso e está pronto para começar a construir seus próprios fluxos de trabalho baseados em agentes! - +Você tem um Flow ponta a ponta com um crew de agente e um relatório salvo — uma base sólida para novas etapas, crews ou ferramentas. -### Observação sobre Consistência nos Nomes +### Consistência de nomes -Os nomes utilizados nos seus arquivos YAML (`agents.yaml` e `tasks.yaml`) devem corresponder aos nomes dos métodos no seu código Python. -Por exemplo, você pode referenciar o agente para tarefas específicas a partir do arquivo `tasks.yaml`. -Essa consistência de nomes permite que a CrewAI conecte automaticamente suas configurações ao seu código; caso contrário, sua tarefa não reconhecerá a referência corretamente. +As chaves do YAML (`researcher`, `research_task`) devem coincidir com os nomes dos métodos na classe `@CrewBase`. Veja [Crews](/pt-BR/concepts/crews) para o padrão completo com decoradores. -#### Exemplos de Referências +## Implantação - - Observe como usamos o mesmo nome para o agente no arquivo `agents.yaml` - (`email_summarizer`) e no método do arquivo `crew.py` (`email_summarizer`). - +Envie seu Flow para o **[CrewAI AMP](https://app.crewai.com)** quando rodar localmente e o projeto estiver em um repositório **GitHub**. Na raiz do projeto: -```yaml agents.yaml -email_summarizer: - role: > - Email Summarizer - goal: > - Summarize emails into a concise and clear summary - backstory: > - You will create a 5 bullet point summary of the report - llm: provider/model-id # Add your choice of model here + +```bash Autenticar +crewai login ``` - - Observe como usamos o mesmo nome para a tarefa no arquivo `tasks.yaml` - (`email_summarizer_task`) e no método no arquivo `crew.py` - (`email_summarizer_task`). - - -```yaml tasks.yaml -email_summarizer_task: - description: > - Summarize the email into a 5 bullet point summary - expected_output: > - A 5 bullet point summary of the email - agent: email_summarizer - context: - - reporting_task - - research_task +```bash Criar implantação +crewai deploy create ``` -## Fazendo o Deploy da Sua Tripulação +```bash Status e logs +crewai deploy status +crewai deploy logs +``` -A forma mais fácil de fazer deploy da sua tripulação em produção é através da [CrewAI AMP](http://app.crewai.com). +```bash Enviar atualizações após mudanças no código +crewai deploy push +``` -Assista a este vídeo tutorial para uma demonstração detalhada de como fazer deploy da sua tripulação na [CrewAI AMP](http://app.crewai.com) usando a CLI. +```bash Listar ou remover implantações +crewai deploy list +crewai deploy remove +``` + - + + A primeira implantação costuma levar **cerca de 1 minuto**. Pré-requisitos completos e fluxo na interface web estão em [Implantar no AMP](/pt-BR/enterprise/guides/deploy-to-amp). + - - Comece com o CrewAI AMP e faça o deploy da sua tripulação em ambiente de - produção com apenas alguns cliques. + + AMP passo a passo (CLI e painel). - Participe da nossa comunidade open source para discutir ideias, compartilhar - seus projetos e conectar-se com outros desenvolvedores CrewAI. + Troque ideias, compartilhe projetos e conecte-se com outros desenvolvedores CrewAI. From ad24c3d56e4484d0703b88517f0564aebfff21da Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Mon, 6 Apr 2026 19:52:53 -0300 Subject: [PATCH 06/21] feat: add guardrail_type and name to distinguish traces (#5303) * feat: add guardrail_type to distinguish between hallucination, function, and LLM * feat: introduce guardrail_name into guardrail events * feat: propagate guardrail type and name on guardrail completed event * feat: remove unused LLMGuardrailFailedEvent * fix: handle running event loop in LLMGuardrail._validate_output When agent.kickoff() returns a coroutine inside an already-running event loop, asyncio.run() fails --- .../events/types/llm_guardrail_events.py | 25 ++++++++----------- lib/crewai/src/crewai/tasks/llm_guardrail.py | 19 +++++++++++++- lib/crewai/src/crewai/utilities/guardrail.py | 16 ++++++------ 3 files changed, 37 insertions(+), 23 deletions(-) diff --git a/lib/crewai/src/crewai/events/types/llm_guardrail_events.py b/lib/crewai/src/crewai/events/types/llm_guardrail_events.py index 8bbcf6e0b..6056059dc 100644 --- a/lib/crewai/src/crewai/events/types/llm_guardrail_events.py +++ b/lib/crewai/src/crewai/events/types/llm_guardrail_events.py @@ -12,6 +12,8 @@ class LLMGuardrailBaseEvent(BaseEvent): from_agent: Any | None = None agent_role: str | None = None agent_id: str | None = None + guardrail_type: str | None = None + guardrail_name: str | None = None def __init__(self, **data: Any) -> None: super().__init__(**data) @@ -37,9 +39,17 @@ class LLMGuardrailStartedEvent(LLMGuardrailBaseEvent): super().__init__(**data) - if isinstance(self.guardrail, (LLMGuardrail, HallucinationGuardrail)): + if isinstance(self.guardrail, HallucinationGuardrail): + self.guardrail_type = "hallucination" + self.guardrail_name = self.guardrail.description.strip() + self.guardrail = self.guardrail.description.strip() + elif isinstance(self.guardrail, LLMGuardrail): + self.guardrail_type = "llm" + self.guardrail_name = self.guardrail.description.strip() self.guardrail = self.guardrail.description.strip() elif callable(self.guardrail): + self.guardrail_type = "function" + self.guardrail_name = getattr(self.guardrail, "__name__", None) self.guardrail = getsource(self.guardrail).strip() @@ -58,16 +68,3 @@ class LLMGuardrailCompletedEvent(LLMGuardrailBaseEvent): result: Any error: str | None = None retry_count: int - - -class LLMGuardrailFailedEvent(LLMGuardrailBaseEvent): - """Event emitted when a guardrail task fails - - Attributes: - error: The error message - retry_count: The number of times the guardrail has been retried - """ - - type: Literal["llm_guardrail_failed"] = "llm_guardrail_failed" - error: str - retry_count: int diff --git a/lib/crewai/src/crewai/tasks/llm_guardrail.py b/lib/crewai/src/crewai/tasks/llm_guardrail.py index 3729e8084..3cbd20c65 100644 --- a/lib/crewai/src/crewai/tasks/llm_guardrail.py +++ b/lib/crewai/src/crewai/tasks/llm_guardrail.py @@ -1,5 +1,7 @@ import asyncio +import concurrent.futures from collections.abc import Coroutine +import contextvars import inspect from typing import Any @@ -19,6 +21,21 @@ def _is_coroutine( return inspect.iscoroutine(obj) +def _run_coroutine_sync(coro: Coroutine[Any, Any, LiteAgentOutput]) -> LiteAgentOutput: + """Run a coroutine synchronously, handling an already-running event loop.""" + try: + asyncio.get_running_loop() + has_running_loop = True + except RuntimeError: + has_running_loop = False + + if has_running_loop: + ctx = contextvars.copy_context() + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool: + return pool.submit(ctx.run, asyncio.run, coro).result() + return asyncio.run(coro) + + class LLMGuardrailResult(BaseModel): valid: bool = Field( description="Whether the task output complies with the guardrail" @@ -75,7 +92,7 @@ class LLMGuardrail: kickoff_result = agent.kickoff(query, response_format=LLMGuardrailResult) if _is_coroutine(kickoff_result): - return asyncio.run(kickoff_result) + return _run_coroutine_sync(kickoff_result) return kickoff_result def __call__(self, task_output: TaskOutput) -> tuple[bool, Any]: diff --git a/lib/crewai/src/crewai/utilities/guardrail.py b/lib/crewai/src/crewai/utilities/guardrail.py index 3c50daef6..b9828cfba 100644 --- a/lib/crewai/src/crewai/utilities/guardrail.py +++ b/lib/crewai/src/crewai/utilities/guardrail.py @@ -118,15 +118,13 @@ def process_guardrail( LLMGuardrailStartedEvent, ) - crewai_event_bus.emit( - event_source, - LLMGuardrailStartedEvent( - guardrail=guardrail, - retry_count=retry_count, - from_agent=from_agent, - from_task=from_task, - ), + started_event = LLMGuardrailStartedEvent( + guardrail=guardrail, + retry_count=retry_count, + from_agent=from_agent, + from_task=from_task, ) + crewai_event_bus.emit(event_source, started_event) result = guardrail(output) guardrail_result = GuardrailResult.from_tuple(result) @@ -138,6 +136,8 @@ def process_guardrail( result=guardrail_result.result, error=guardrail_result.error, retry_count=retry_count, + guardrail_type=started_event.guardrail_type, + guardrail_name=started_event.guardrail_name, from_agent=from_agent, from_task=from_task, ), From c132d57a36d6a2e3911c948828f636791ee5b01c Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Tue, 7 Apr 2026 09:35:26 +0800 Subject: [PATCH 07/21] perf: use JSONB for checkpoint data column --- lib/crewai/src/crewai/state/provider/sqlite_provider.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/crewai/src/crewai/state/provider/sqlite_provider.py b/lib/crewai/src/crewai/state/provider/sqlite_provider.py index 51f7096d2..7a1d89399 100644 --- a/lib/crewai/src/crewai/state/provider/sqlite_provider.py +++ b/lib/crewai/src/crewai/state/provider/sqlite_provider.py @@ -16,12 +16,12 @@ _CREATE_TABLE = """ CREATE TABLE IF NOT EXISTS checkpoints ( id TEXT PRIMARY KEY, created_at TEXT NOT NULL, - data TEXT NOT NULL + data JSONB NOT NULL ) """ -_INSERT = "INSERT INTO checkpoints (id, created_at, data) VALUES (?, ?, ?)" -_SELECT = "SELECT data FROM checkpoints WHERE id = ?" +_INSERT = "INSERT INTO checkpoints (id, created_at, data) VALUES (?, ?, jsonb(?))" +_SELECT = "SELECT json(data) FROM checkpoints WHERE id = ?" _PRUNE = """ DELETE FROM checkpoints WHERE rowid NOT IN ( SELECT rowid FROM checkpoints ORDER BY rowid DESC LIMIT ? From e64b37c5fc8336878d6f555a8552ad9a9ea95837 Mon Sep 17 00:00:00 2001 From: alex-clawd Date: Mon, 6 Apr 2026 23:59:40 -0700 Subject: [PATCH 08/21] refactor: remove CodeInterpreterTool and deprecate code execution params (#5309) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor: remove CodeInterpreterTool and deprecate code execution params CodeInterpreterTool has been removed. The allow_code_execution and code_execution_mode parameters on Agent are deprecated and will be removed in v2.0. Use dedicated sandbox services (E2B, Modal, etc.) for code execution needs. Changes: - Remove CodeInterpreterTool from crewai-tools (tool, Dockerfile, tests, imports) - Remove docker dependency from crewai-tools - Deprecate allow_code_execution and code_execution_mode on Agent - get_code_execution_tools() returns empty list with deprecation warning - _validate_docker_installation() is a no-op with deprecation warning - Bedrock CodeInterpreter (AWS hosted) and OpenAI code_interpreter are NOT affected * fix: remove empty code_interpreter imports and unused stdlib imports - Remove empty `from code_interpreter_tool import ()` blocks in both crewai_tools/__init__.py and tools/__init__.py that caused SyntaxError after CodeInterpreterTool was removed - Remove unused `shutil` and `subprocess` imports from agent/core.py left over from the code execution params deprecation Co-Authored-By: Claude Sonnet 4.6 * fix: remove redundant _validate_docker_installation call and fix list type annotation - Drop the _validate_docker_installation() call inside the allow_code_execution block — it fired a second DeprecationWarning identical to the one emitted just above it, making the warning fire twice. - Annotate get_code_execution_tools() return type as list[Any] to satisfy mypy (bare `list` fails the type-arg check introduced by this branch). Co-Authored-By: Claude Sonnet 4.6 * ci: retrigger * fix: update test_crew.py to remove CodeInterpreterTool references CodeInterpreterTool was removed from crewai_tools. Update tests to reflect that get_code_execution_tools() now returns an empty list. Co-Authored-By: Claude Sonnet 4.6 * chore: update tool specifications --------- Co-authored-by: Claude Sonnet 4.6 Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- lib/crewai-tools/pyproject.toml | 1 - lib/crewai-tools/src/crewai_tools/__init__.py | 4 - .../src/crewai_tools/tools/__init__.py | 4 - .../tools/code_interpreter_tool/Dockerfile | 6 - .../tools/code_interpreter_tool/README.md | 95 -- .../tools/code_interpreter_tool/__init__.py | 0 .../code_interpreter_tool.py | 424 --------- .../tests/tools/test_code_interpreter_tool.py | 253 ------ lib/crewai-tools/tool.specs.json | 841 +++++++++++++++--- lib/crewai/src/crewai/agent/core.py | 71 +- lib/crewai/tests/test_crew.py | 17 +- 11 files changed, 727 insertions(+), 989 deletions(-) delete mode 100644 lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/Dockerfile delete mode 100644 lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/README.md delete mode 100644 lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/__init__.py delete mode 100644 lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py delete mode 100644 lib/crewai-tools/tests/tools/test_code_interpreter_tool.py diff --git a/lib/crewai-tools/pyproject.toml b/lib/crewai-tools/pyproject.toml index 0996a58fe..9fa051003 100644 --- a/lib/crewai-tools/pyproject.toml +++ b/lib/crewai-tools/pyproject.toml @@ -10,7 +10,6 @@ requires-python = ">=3.10, <3.14" dependencies = [ "pytube~=15.0.0", "requests~=2.32.5", - "docker~=7.1.0", "crewai==1.14.0a3", "tiktoken~=0.8.0", "beautifulsoup4~=4.13.4", diff --git a/lib/crewai-tools/src/crewai_tools/__init__.py b/lib/crewai-tools/src/crewai_tools/__init__.py index 372b683e8..bdaa0499b 100644 --- a/lib/crewai-tools/src/crewai_tools/__init__.py +++ b/lib/crewai-tools/src/crewai_tools/__init__.py @@ -35,9 +35,6 @@ from crewai_tools.tools.browserbase_load_tool.browserbase_load_tool import ( from crewai_tools.tools.code_docs_search_tool.code_docs_search_tool import ( CodeDocsSearchTool, ) -from crewai_tools.tools.code_interpreter_tool.code_interpreter_tool import ( - CodeInterpreterTool, -) from crewai_tools.tools.composio_tool.composio_tool import ComposioTool from crewai_tools.tools.contextualai_create_agent_tool.contextual_create_agent_tool import ( ContextualAICreateAgentTool, @@ -225,7 +222,6 @@ __all__ = [ "BrowserbaseLoadTool", "CSVSearchTool", "CodeDocsSearchTool", - "CodeInterpreterTool", "ComposioTool", "ContextualAICreateAgentTool", "ContextualAIParseTool", diff --git a/lib/crewai-tools/src/crewai_tools/tools/__init__.py b/lib/crewai-tools/src/crewai_tools/tools/__init__.py index 56e77ffe4..d3c1da664 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/__init__.py +++ b/lib/crewai-tools/src/crewai_tools/tools/__init__.py @@ -24,9 +24,6 @@ from crewai_tools.tools.browserbase_load_tool.browserbase_load_tool import ( from crewai_tools.tools.code_docs_search_tool.code_docs_search_tool import ( CodeDocsSearchTool, ) -from crewai_tools.tools.code_interpreter_tool.code_interpreter_tool import ( - CodeInterpreterTool, -) from crewai_tools.tools.composio_tool.composio_tool import ComposioTool from crewai_tools.tools.contextualai_create_agent_tool.contextual_create_agent_tool import ( ContextualAICreateAgentTool, @@ -210,7 +207,6 @@ __all__ = [ "BrowserbaseLoadTool", "CSVSearchTool", "CodeDocsSearchTool", - "CodeInterpreterTool", "ComposioTool", "ContextualAICreateAgentTool", "ContextualAIParseTool", diff --git a/lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/Dockerfile b/lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/Dockerfile deleted file mode 100644 index 4df22ca58..000000000 --- a/lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM python:3.12-alpine - -RUN pip install requests beautifulsoup4 - -# Set the working directory -WORKDIR /workspace diff --git a/lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/README.md b/lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/README.md deleted file mode 100644 index 278b71067..000000000 --- a/lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/README.md +++ /dev/null @@ -1,95 +0,0 @@ -# CodeInterpreterTool - -## Description -This tool is used to give the Agent the ability to run code (Python3) from the code generated by the Agent itself. The code is executed in a Docker container for secure isolation. - -It is incredibly useful since it allows the Agent to generate code, run it in an isolated environment, get the result and use it to make decisions. - -## ⚠️ Security Requirements - -**Docker is REQUIRED** for safe code execution. The tool will refuse to execute code without Docker to prevent security vulnerabilities. - -### Why Docker is Required - -Previous versions included a "restricted sandbox" fallback when Docker was unavailable. This has been **removed** due to critical security vulnerabilities: - -- The Python-based sandbox could be escaped via object introspection -- Attackers could recover the original `__import__` function and access any module -- This allowed arbitrary command execution on the host system - -**Docker provides real process isolation** and is the only secure way to execute untrusted code. - -## Requirements - -- **Docker (REQUIRED)** - Install from [docker.com](https://docs.docker.com/get-docker/) - -## Installation -Install the crewai_tools package -```shell -pip install 'crewai[tools]' -``` - -## Example - -Remember that when using this tool, the code must be generated by the Agent itself. The code must be Python3 code. It will take some time the first time to run because it needs to build the Docker image. - -### Basic Usage (Docker Container - Recommended) - -```python -from crewai_tools import CodeInterpreterTool - -Agent( - ... - tools=[CodeInterpreterTool()], -) -``` - -### Custom Dockerfile - -If you need to pass your own Dockerfile: - -```python -from crewai_tools import CodeInterpreterTool - -Agent( - ... - tools=[CodeInterpreterTool(user_dockerfile_path="")], -) -``` - -### Manual Docker Host Configuration - -If it is difficult to connect to the Docker daemon automatically (especially for macOS users), you can set up the Docker host manually: - -```python -from crewai_tools import CodeInterpreterTool - -Agent( - ... - tools=[CodeInterpreterTool( - user_docker_base_url="", - user_dockerfile_path="" - )], -) -``` - -### Unsafe Mode (NOT RECOMMENDED) - -If you absolutely cannot use Docker and **fully trust the code source**, you can use unsafe mode: - -```python -from crewai_tools import CodeInterpreterTool - -# WARNING: Only use with fully trusted code! -Agent( - ... - tools=[CodeInterpreterTool(unsafe_mode=True)], -) -``` - -**⚠️ SECURITY WARNING:** `unsafe_mode=True` executes code directly on the host without any isolation. Only use this if: -- You completely trust the code being executed -- You understand the security risks -- You cannot install Docker in your environment - -For production use, **always use Docker** (the default mode). diff --git a/lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/__init__.py b/lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py deleted file mode 100644 index 9ad969966..000000000 --- a/lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ /dev/null @@ -1,424 +0,0 @@ -"""Code Interpreter Tool for executing Python code in isolated environments. - -This module provides a tool for executing Python code either in a Docker container for -safe isolation or directly in a restricted sandbox. It includes mechanisms for blocking -potentially unsafe operations and importing restricted modules. -""" - -import importlib.util -import os -import subprocess -import sys -from types import ModuleType -from typing import Any, ClassVar, TypedDict - -from crewai.tools import BaseTool -from docker import ( # type: ignore[import-untyped] - DockerClient, - from_env as docker_from_env, -) -from docker.errors import ImageNotFound, NotFound # type: ignore[import-untyped] -from pydantic import BaseModel, Field -from typing_extensions import Unpack - -from crewai_tools.printer import Printer - - -class RunKwargs(TypedDict, total=False): - """Keyword arguments for the _run method.""" - - code: str - libraries_used: list[str] - - -class CodeInterpreterSchema(BaseModel): - """Schema for defining inputs to the CodeInterpreterTool. - - This schema defines the required parameters for code execution, - including the code to run and any libraries that need to be installed. - """ - - code: str = Field( - ..., - description="Python3 code used to be interpreted in the Docker container. ALWAYS PRINT the final result and the output of the code", - ) - - libraries_used: list[str] = Field( - ..., - description="List of libraries used in the code with proper installing names separated by commas. Example: numpy,pandas,beautifulsoup4", - ) - - -class SandboxPython: - """INSECURE: A restricted Python execution environment with known vulnerabilities. - - WARNING: This class does NOT provide real security isolation and is vulnerable to - sandbox escape attacks via Python object introspection. Attackers can recover the - original __import__ function and bypass all restrictions. - - DO NOT USE for untrusted code execution. Use Docker containers instead. - - This class attempts to restrict access to dangerous modules and built-in functions - but provides no real security boundary against a motivated attacker. - """ - - BLOCKED_MODULES: ClassVar[set[str]] = { - "os", - "sys", - "subprocess", - "shutil", - "importlib", - "inspect", - "tempfile", - "sysconfig", - "builtins", - } - - UNSAFE_BUILTINS: ClassVar[set[str]] = { - "exec", - "eval", - "open", - "compile", - "input", - "globals", - "locals", - "vars", - "help", - "dir", - } - - @staticmethod - def restricted_import( - name: str, - custom_globals: dict[str, Any] | None = None, - custom_locals: dict[str, Any] | None = None, - fromlist: list[str] | None = None, - level: int = 0, - ) -> ModuleType: - """A restricted import function that blocks importing of unsafe modules. - - Args: - name: The name of the module to import. - custom_globals: Global namespace to use. - custom_locals: Local namespace to use. - fromlist: List of items to import from the module. - level: The level value passed to __import__. - - Returns: - The imported module if allowed. - - Raises: - ImportError: If the module is in the blocked modules list. - """ - if name in SandboxPython.BLOCKED_MODULES: - raise ImportError(f"Importing '{name}' is not allowed.") - return __import__(name, custom_globals, custom_locals, fromlist or (), level) - - @staticmethod - def safe_builtins() -> dict[str, Any]: - """Creates a dictionary of built-in functions with unsafe ones removed. - - Returns: - A dictionary of safe built-in functions and objects. - """ - import builtins - - safe_builtins = { - k: v - for k, v in builtins.__dict__.items() - if k not in SandboxPython.UNSAFE_BUILTINS - } - safe_builtins["__import__"] = SandboxPython.restricted_import - return safe_builtins - - @staticmethod - def exec(code: str, locals_: dict[str, Any]) -> None: - """Executes Python code in a restricted environment. - - Args: - code: The Python code to execute as a string. - locals_: A dictionary that will be used for local variable storage. - """ - exec(code, {"__builtins__": SandboxPython.safe_builtins()}, locals_) # noqa: S102 - - -class CodeInterpreterTool(BaseTool): - """A tool for executing Python code in isolated environments. - - This tool provides functionality to run Python code either in a Docker container - for safe isolation or directly in a restricted sandbox. It can handle installing - Python packages and executing arbitrary Python code. - """ - - name: str = "Code Interpreter" - description: str = "Interprets Python3 code strings with a final print statement." - args_schema: type[BaseModel] = CodeInterpreterSchema - default_image_tag: str = "code-interpreter:latest" - code: str | None = None - user_dockerfile_path: str | None = None - user_docker_base_url: str | None = None - unsafe_mode: bool = False - - @staticmethod - def _get_installed_package_path() -> str: - """Gets the installation path of the crewai_tools package. - - Returns: - The directory path where the package is installed. - - Raises: - RuntimeError: If the package cannot be found. - """ - spec = importlib.util.find_spec("crewai_tools") - if spec is None or spec.origin is None: - raise RuntimeError("Cannot find crewai_tools package installation path") - return os.path.dirname(spec.origin) - - def _verify_docker_image(self) -> None: - """Verifies if the Docker image is available or builds it if necessary. - - Checks if the required Docker image exists. If not, builds it using either a - user-provided Dockerfile or the default one included with the package. - - Raises: - FileNotFoundError: If the Dockerfile cannot be found. - """ - client = ( - docker_from_env() - if self.user_docker_base_url is None - else DockerClient(base_url=self.user_docker_base_url) - ) - - try: - client.images.get(self.default_image_tag) - - except ImageNotFound: - if self.user_dockerfile_path and os.path.exists(self.user_dockerfile_path): - dockerfile_path = self.user_dockerfile_path - else: - package_path = self._get_installed_package_path() - dockerfile_path = os.path.join( - package_path, "tools/code_interpreter_tool" - ) - if not os.path.exists(dockerfile_path): - raise FileNotFoundError( - f"Dockerfile not found in {dockerfile_path}" - ) from None - - client.images.build( - path=dockerfile_path, - tag=self.default_image_tag, - rm=True, - ) - - def _run(self, **kwargs: Unpack[RunKwargs]) -> str: - """Runs the code interpreter tool with the provided arguments. - - Args: - **kwargs: Keyword arguments that should include 'code' and 'libraries_used'. - - Returns: - The output of the executed code as a string. - """ - code: str | None = kwargs.get("code", self.code) - libraries_used: list[str] = kwargs.get("libraries_used", []) - - if not code: - return "No code provided to execute." - - if self.unsafe_mode: - return self.run_code_unsafe(code, libraries_used) - return self.run_code_safety(code, libraries_used) - - @staticmethod - def _install_libraries(container: Any, libraries: list[str]) -> None: - """Installs required Python libraries in the Docker container. - - Args: - container: The Docker container where libraries will be installed. - libraries: A list of library names to install using pip. - """ - for library in libraries: - container.exec_run(["pip", "install", library]) - - def _init_docker_container(self) -> Any: - """Initializes and returns a Docker container for code execution. - - Stops and removes any existing container with the same name before creating - a new one. Maps the current working directory to /workspace in the container. - - Returns: - A Docker container object ready for code execution. - """ - container_name = "code-interpreter" - client = docker_from_env() - current_path = os.getcwd() - - # Check if the container is already running - try: - existing_container = client.containers.get(container_name) - existing_container.stop() - existing_container.remove() - except NotFound: - pass # Container does not exist, no need to remove - - return client.containers.run( - self.default_image_tag, - detach=True, - tty=True, - working_dir="/workspace", - name=container_name, - volumes={current_path: {"bind": "/workspace", "mode": "rw"}}, - ) - - @staticmethod - def _check_docker_available() -> bool: - """Checks if Docker is available and running on the system. - - Attempts to run the 'docker info' command to verify Docker availability. - Prints appropriate messages if Docker is not installed or not running. - - Returns: - True if Docker is available and running, False otherwise. - """ - - try: - subprocess.run( - ["docker", "info"], # noqa: S607 - check=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - timeout=1, - ) - return True - except (subprocess.CalledProcessError, subprocess.TimeoutExpired): - Printer.print( - "Docker is installed but not running or inaccessible.", - color="bold_purple", - ) - return False - except FileNotFoundError: - Printer.print("Docker is not installed", color="bold_purple") - return False - - def run_code_safety(self, code: str, libraries_used: list[str]) -> str: - """Runs code in the safest available environment. - - Requires Docker to be available for secure code execution. Fails closed - if Docker is not available to prevent sandbox escape vulnerabilities. - - Args: - code: The Python code to execute as a string. - libraries_used: A list of Python library names to install before execution. - - Returns: - The output of the executed code as a string. - - Raises: - RuntimeError: If Docker is not available, as the restricted sandbox - is vulnerable to escape attacks and should not be used - for untrusted code execution. - """ - if self._check_docker_available(): - return self.run_code_in_docker(code, libraries_used) - - error_msg = ( - "Docker is required for safe code execution but is not available. " - "The restricted sandbox fallback has been removed due to security vulnerabilities " - "that allow sandbox escape via Python object introspection. " - "Please install Docker (https://docs.docker.com/get-docker/) or use unsafe_mode=True " - "if you trust the code source and understand the security risks." - ) - Printer.print(error_msg, color="bold_red") - raise RuntimeError(error_msg) - - def run_code_in_docker(self, code: str, libraries_used: list[str]) -> str: - """Runs Python code in a Docker container for safe isolation. - - Creates a Docker container, installs the required libraries, executes the code, - and then cleans up by stopping and removing the container. - - Args: - code: The Python code to execute as a string. - libraries_used: A list of Python library names to install before execution. - - Returns: - The output of the executed code as a string, or an error message if execution failed. - """ - Printer.print("Running code in Docker environment", color="bold_blue") - self._verify_docker_image() - container = self._init_docker_container() - self._install_libraries(container, libraries_used) - - exec_result: Any = container.exec_run(["python3", "-c", code]) - - container.stop() - container.remove() - - if exec_result.exit_code != 0: - return f"Something went wrong while running the code: \n{exec_result.output.decode('utf-8')}" - return str(exec_result.output.decode("utf-8")) - - @staticmethod - def run_code_in_restricted_sandbox(code: str) -> str: - """DEPRECATED AND INSECURE: Runs Python code in a restricted sandbox environment. - - WARNING: This method is vulnerable to sandbox escape attacks via Python object - introspection and should NOT be used for untrusted code execution. It has been - deprecated and is only kept for backward compatibility with trusted code. - - The "restricted" environment can be bypassed by attackers who can: - - Use object graph introspection to recover the original __import__ function - - Access any Python module including os, subprocess, sys, etc. - - Execute arbitrary commands on the host system - - Use run_code_in_docker() for secure code execution, or run_code_unsafe() - if you explicitly acknowledge the security risks. - - Args: - code: The Python code to execute as a string. - - Returns: - The value of the 'result' variable from the executed code, - or an error message if execution failed. - """ - Printer.print( - "WARNING: Running code in INSECURE restricted sandbox (vulnerable to escape attacks)", - color="bold_red", - ) - exec_locals: dict[str, Any] = {} - try: - SandboxPython.exec(code=code, locals_=exec_locals) - return exec_locals.get("result", "No result variable found.") # type: ignore[no-any-return] - except Exception as e: - return f"An error occurred: {e!s}" - - @staticmethod - def run_code_unsafe(code: str, libraries_used: list[str]) -> str: - """Runs code directly on the host machine without any safety restrictions. - - WARNING: This mode is unsafe and should only be used in trusted environments - with code from trusted sources. - - Args: - code: The Python code to execute as a string. - libraries_used: A list of Python library names to install before execution. - - Returns: - The value of the 'result' variable from the executed code, - or an error message if execution failed. - """ - Printer.print("WARNING: Running code in unsafe mode", color="bold_magenta") - # Install libraries on the host machine - for library in libraries_used: - subprocess.run( # noqa: S603 - [sys.executable, "-m", "pip", "install", library], check=False - ) - - # Execute the code - try: - exec_locals: dict[str, Any] = {} - exec(code, {}, exec_locals) # noqa: S102 - return exec_locals.get("result", "No result variable found.") # type: ignore[no-any-return] - except Exception as e: - return f"An error occurred: {e!s}" diff --git a/lib/crewai-tools/tests/tools/test_code_interpreter_tool.py b/lib/crewai-tools/tests/tools/test_code_interpreter_tool.py deleted file mode 100644 index 5b0144790..000000000 --- a/lib/crewai-tools/tests/tools/test_code_interpreter_tool.py +++ /dev/null @@ -1,253 +0,0 @@ -import sys -from unittest.mock import patch - -from crewai_tools.tools.code_interpreter_tool.code_interpreter_tool import ( - CodeInterpreterTool, - SandboxPython, -) -import pytest - - -@pytest.fixture -def printer_mock(): - with patch("crewai_tools.printer.Printer.print") as mock: - yield mock - - -@pytest.fixture -def docker_unavailable_mock(): - with patch( - "crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.CodeInterpreterTool._check_docker_available", - return_value=False, - ) as mock: - yield mock - - -@patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env") -def test_run_code_in_docker(docker_mock, printer_mock): - tool = CodeInterpreterTool() - code = "print('Hello, World!')" - libraries_used = ["numpy", "pandas"] - expected_output = "Hello, World!\n" - - docker_mock().containers.run().exec_run().exit_code = 0 - docker_mock().containers.run().exec_run().output = expected_output.encode() - - result = tool.run_code_in_docker(code, libraries_used) - assert result == expected_output - printer_mock.assert_called_with( - "Running code in Docker environment", color="bold_blue" - ) - - -@patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env") -def test_run_code_in_docker_with_error(docker_mock, printer_mock): - tool = CodeInterpreterTool() - code = "print(1/0)" - libraries_used = ["numpy", "pandas"] - expected_output = "Something went wrong while running the code: \nZeroDivisionError: division by zero\n" - - docker_mock().containers.run().exec_run().exit_code = 1 - docker_mock().containers.run().exec_run().output = ( - b"ZeroDivisionError: division by zero\n" - ) - - result = tool.run_code_in_docker(code, libraries_used) - assert result == expected_output - printer_mock.assert_called_with( - "Running code in Docker environment", color="bold_blue" - ) - - -@patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env") -def test_run_code_in_docker_with_script(docker_mock, printer_mock): - tool = CodeInterpreterTool() - code = """print("This is line 1") -print("This is line 2")""" - libraries_used = [] - expected_output = "This is line 1\nThis is line 2\n" - - docker_mock().containers.run().exec_run().exit_code = 0 - docker_mock().containers.run().exec_run().output = expected_output.encode() - - result = tool.run_code_in_docker(code, libraries_used) - assert result == expected_output - printer_mock.assert_called_with( - "Running code in Docker environment", color="bold_blue" - ) - - -def test_docker_unavailable_raises_error(printer_mock, docker_unavailable_mock): - """Test that execution fails when Docker is unavailable in safe mode.""" - tool = CodeInterpreterTool() - code = """ -result = 2 + 2 -print(result) -""" - with pytest.raises(RuntimeError) as exc_info: - tool.run(code=code, libraries_used=[]) - - assert "Docker is required for safe code execution" in str(exc_info.value) - assert "sandbox escape" in str(exc_info.value) - - -def test_restricted_sandbox_running_with_blocked_modules(): - """Test that restricted modules cannot be imported when using the deprecated sandbox directly.""" - tool = CodeInterpreterTool() - restricted_modules = SandboxPython.BLOCKED_MODULES - - for module in restricted_modules: - code = f""" -import {module} -result = "Import succeeded" -""" - # Note: run_code_in_restricted_sandbox is deprecated and insecure - # This test verifies the old behavior but should not be used in production - result = tool.run_code_in_restricted_sandbox(code) - - assert f"An error occurred: Importing '{module}' is not allowed" in result - - -def test_restricted_sandbox_running_with_blocked_builtins(): - """Test that restricted builtins are not available when using the deprecated sandbox directly.""" - tool = CodeInterpreterTool() - restricted_builtins = SandboxPython.UNSAFE_BUILTINS - - for builtin in restricted_builtins: - code = f""" -{builtin}("test") -result = "Builtin available" -""" - # Note: run_code_in_restricted_sandbox is deprecated and insecure - # This test verifies the old behavior but should not be used in production - result = tool.run_code_in_restricted_sandbox(code) - assert f"An error occurred: name '{builtin}' is not defined" in result - - -def test_restricted_sandbox_running_with_no_result_variable( - printer_mock, docker_unavailable_mock -): - """Test behavior when no result variable is set in deprecated sandbox.""" - tool = CodeInterpreterTool() - code = """ -x = 10 -""" - # Note: run_code_in_restricted_sandbox is deprecated and insecure - # This test verifies the old behavior but should not be used in production - result = tool.run_code_in_restricted_sandbox(code) - assert result == "No result variable found." - - -def test_unsafe_mode_running_with_no_result_variable( - printer_mock, docker_unavailable_mock -): - """Test behavior when no result variable is set.""" - tool = CodeInterpreterTool(unsafe_mode=True) - code = """ -x = 10 -""" - result = tool.run(code=code, libraries_used=[]) - printer_mock.assert_called_with( - "WARNING: Running code in unsafe mode", color="bold_magenta" - ) - assert result == "No result variable found." - - -@patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.subprocess.run") -def test_unsafe_mode_installs_libraries_without_shell( - subprocess_run_mock, printer_mock, docker_unavailable_mock -): - """Test that library installation uses subprocess.run with shell=False, not os.system.""" - tool = CodeInterpreterTool(unsafe_mode=True) - code = "result = 1" - libraries_used = ["numpy", "pandas"] - - tool.run(code=code, libraries_used=libraries_used) - - assert subprocess_run_mock.call_count == 2 - for call, library in zip(subprocess_run_mock.call_args_list, libraries_used): - args, kwargs = call - # Must be list form (no shell expansion possible) - assert args[0] == [sys.executable, "-m", "pip", "install", library] - # shell= must not be True (defaults to False) - assert kwargs.get("shell", False) is False - - -@patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.subprocess.run") -def test_unsafe_mode_library_name_with_shell_metacharacters_does_not_invoke_shell( - subprocess_run_mock, printer_mock, docker_unavailable_mock -): - """Test that a malicious library name cannot inject shell commands.""" - tool = CodeInterpreterTool(unsafe_mode=True) - code = "result = 1" - malicious_library = "numpy; rm -rf /" - - tool.run(code=code, libraries_used=[malicious_library]) - - subprocess_run_mock.assert_called_once() - args, kwargs = subprocess_run_mock.call_args - # The entire malicious string is passed as a single argument — no shell parsing - assert args[0] == [sys.executable, "-m", "pip", "install", malicious_library] - assert kwargs.get("shell", False) is False - - -def test_unsafe_mode_running_unsafe_code(printer_mock, docker_unavailable_mock): - """Test behavior when no result variable is set.""" - tool = CodeInterpreterTool(unsafe_mode=True) - code = """ -import os -os.system("ls -la") -result = eval("5/1") -""" - result = tool.run(code=code, libraries_used=[]) - printer_mock.assert_called_with( - "WARNING: Running code in unsafe mode", color="bold_magenta" - ) - assert 5.0 == result - - -@pytest.mark.xfail( - reason=( - "run_code_in_restricted_sandbox is known to be vulnerable to sandbox " - "escape via object introspection. This test encodes the desired secure " - "behavior (no escape possible) and will start passing once the " - "vulnerability is fixed or the function is removed." - ) -) -def test_sandbox_escape_vulnerability_demonstration(printer_mock): - """Demonstrate that the restricted sandbox is vulnerable to escape attacks. - - This test shows that an attacker can use Python object introspection to bypass - the restricted sandbox and access blocked modules like 'os'. This is why the - sandbox should never be used for untrusted code execution. - - NOTE: This test uses the deprecated run_code_in_restricted_sandbox directly - to demonstrate the vulnerability. In production, Docker is now required. - """ - tool = CodeInterpreterTool() - - # Classic Python sandbox escape via object introspection - escape_code = """ -# Recover the real __import__ function via object introspection -for cls in ().__class__.__bases__[0].__subclasses__(): - if cls.__name__ == 'catch_warnings': - # Get the real builtins module - real_builtins = cls()._module.__builtins__ - real_import = real_builtins['__import__'] - # Now we can import os and execute commands - os = real_import('os') - # Demonstrate we have escaped the sandbox - result = "SANDBOX_ESCAPED" if hasattr(os, 'system') else "FAILED" - break -""" - - # The deprecated sandbox is vulnerable to this attack - result = tool.run_code_in_restricted_sandbox(escape_code) - - # Desired behavior: the restricted sandbox should prevent this escape. - # If this assertion fails, run_code_in_restricted_sandbox remains vulnerable. - assert result != "SANDBOX_ESCAPED", ( - "The restricted sandbox was bypassed via object introspection. " - "This indicates run_code_in_restricted_sandbox is still vulnerable and " - "is why Docker is now required for safe code execution." - ) diff --git a/lib/crewai-tools/tool.specs.json b/lib/crewai-tools/tool.specs.json index 893be45a4..adc392bab 100644 --- a/lib/crewai-tools/tool.specs.json +++ b/lib/crewai-tools/tool.specs.json @@ -81,8 +81,16 @@ ], "default": null, "title": "Mind Name" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "AIMindTool", "type": "object" }, @@ -160,12 +168,20 @@ "title": "Save Dir", "type": "string" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "use_title_as_filename": { "default": false, "title": "Use Title As Filename", "type": "boolean" } }, + "required": [ + "tool_type" + ], "title": "ArxivPaperTool", "type": "object" }, @@ -281,8 +297,16 @@ "default": "https://api.search.brave.com/res/v1/images/search", "title": "Search Url", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "BraveImageSearchTool", "type": "object" }, @@ -464,8 +488,16 @@ "default": "https://api.search.brave.com/res/v1/llm/context", "title": "Search Url", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "BraveLLMContextTool", "type": "object" }, @@ -743,8 +775,16 @@ "default": "https://api.search.brave.com/res/v1/local/descriptions", "title": "Search Url", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "BraveLocalPOIsDescriptionTool", "type": "object" }, @@ -856,8 +896,16 @@ "default": "https://api.search.brave.com/res/v1/local/pois", "title": "Search Url", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "BraveLocalPOIsTool", "type": "object" }, @@ -1014,8 +1062,16 @@ "default": "https://api.search.brave.com/res/v1/news/search", "title": "Search Url", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "BraveNewsSearchTool", "type": "object" }, @@ -1288,8 +1344,16 @@ "default": "https://api.search.brave.com/res/v1/web/search", "title": "Search Url", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "BraveSearchTool", "type": "object" }, @@ -1665,8 +1729,16 @@ "default": "https://api.search.brave.com/res/v1/videos/search", "title": "Search Url", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "BraveVideoSearchTool", "type": "object" }, @@ -1927,8 +1999,16 @@ "default": "https://api.search.brave.com/res/v1/web/search", "title": "Search Url", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "BraveWebSearchTool", "type": "object" }, @@ -2300,6 +2380,11 @@ "title": "Format", "type": "string" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "url": { "anyOf": [ { @@ -2325,6 +2410,9 @@ "title": "Zipcode" } }, + "required": [ + "tool_type" + ], "title": "BrightDataDatasetTool", "type": "object" }, @@ -2502,12 +2590,20 @@ "default": null, "title": "Search Type" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "zone": { "default": "", "title": "Zone", "type": "string" } }, + "required": [ + "tool_type" + ], "title": "BrightDataSearchTool", "type": "object" }, @@ -2678,6 +2774,11 @@ "title": "Format", "type": "string" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "url": { "anyOf": [ { @@ -2696,6 +2797,9 @@ "type": "string" } }, + "required": [ + "tool_type" + ], "title": "BrightDataWebUnlockerTool", "type": "object" }, @@ -2868,8 +2972,16 @@ ], "default": false, "title": "Text Content" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "BrowserbaseLoadTool", "type": "object" }, @@ -3914,8 +4026,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "CSVSearchTool", "type": "object" }, @@ -4965,8 +5085,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "CodeDocsSearchTool", "type": "object" }, @@ -4994,127 +5122,6 @@ "type": "object" } }, - { - "description": "Interprets Python3 code strings with a final print statement.", - "env_vars": [], - "humanized_name": "Code Interpreter", - "init_params_schema": { - "$defs": { - "EnvVar": { - "properties": { - "default": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "title": "Default" - }, - "description": { - "title": "Description", - "type": "string" - }, - "name": { - "title": "Name", - "type": "string" - }, - "required": { - "default": true, - "title": "Required", - "type": "boolean" - } - }, - "required": [ - "name", - "description" - ], - "title": "EnvVar", - "type": "object" - } - }, - "description": "A tool for executing Python code in isolated environments.\n\nThis tool provides functionality to run Python code either in a Docker container\nfor safe isolation or directly in a restricted sandbox. It can handle installing\nPython packages and executing arbitrary Python code.", - "properties": { - "code": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "title": "Code" - }, - "default_image_tag": { - "default": "code-interpreter:latest", - "title": "Default Image Tag", - "type": "string" - }, - "unsafe_mode": { - "default": false, - "title": "Unsafe Mode", - "type": "boolean" - }, - "user_docker_base_url": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "title": "User Docker Base Url" - }, - "user_dockerfile_path": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "title": "User Dockerfile Path" - } - }, - "title": "CodeInterpreterTool", - "type": "object" - }, - "name": "CodeInterpreterTool", - "package_dependencies": [], - "run_params_schema": { - "description": "Schema for defining inputs to the CodeInterpreterTool.\n\nThis schema defines the required parameters for code execution,\nincluding the code to run and any libraries that need to be installed.", - "properties": { - "code": { - "description": "Python3 code used to be interpreted in the Docker container. ALWAYS PRINT the final result and the output of the code", - "title": "Code", - "type": "string" - }, - "libraries_used": { - "description": "List of libraries used in the code with proper installing names separated by commas. Example: numpy,pandas,beautifulsoup4", - "items": { - "type": "string" - }, - "title": "Libraries Used", - "type": "array" - } - }, - "required": [ - "code", - "libraries_used" - ], - "title": "CodeInterpreterSchema", - "type": "object" - } - }, { "description": "", "env_vars": [ @@ -5165,10 +5172,17 @@ } }, "description": "Wrapper for composio tools.", - "properties": {}, + "properties": { + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + } + }, "required": [ "name", - "description" + "description", + "tool_type" ], "title": "ComposioTool", "type": "object" @@ -5232,10 +5246,16 @@ "contextual_client": { "default": null, "title": "Contextual Client" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ - "api_key" + "api_key", + "tool_type" ], "title": "ContextualAICreateAgentTool", "type": "object" @@ -5328,10 +5348,16 @@ "api_key": { "title": "Api Key", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ - "api_key" + "api_key", + "tool_type" ], "title": "ContextualAIParseTool", "type": "object" @@ -5449,10 +5475,16 @@ "contextual_client": { "default": null, "title": "Contextual Client" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ - "api_key" + "api_key", + "tool_type" ], "title": "ContextualAIQueryTool", "type": "object" @@ -5543,10 +5575,16 @@ "api_key": { "title": "Api Key", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ - "api_key" + "api_key", + "tool_type" ], "title": "ContextualAIRerankTool", "type": "object" @@ -5713,6 +5751,11 @@ "description": "Specify whether the index is scoped. Is True by default.", "title": "Scoped Index", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ @@ -5720,7 +5763,8 @@ "collection_name", "scope_name", "bucket_name", - "index_name" + "index_name", + "tool_type" ], "title": "CouchbaseFTSVectorSearchTool", "type": "object" @@ -6765,8 +6809,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "DOCXSearchTool", "type": "object" }, @@ -6902,8 +6954,16 @@ ], "default": "1024x1024", "title": "Size" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "DallETool", "type": "object" }, @@ -7004,8 +7064,16 @@ ], "default": null, "title": "Default Warehouse Id" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "DatabricksQueryTool", "type": "object" }, @@ -7135,8 +7203,16 @@ ], "default": null, "title": "Directory" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "DirectoryReadTool", "type": "object" }, @@ -8180,8 +8256,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "DirectorySearchTool", "type": "object" }, @@ -8325,6 +8409,11 @@ "default": false, "title": "Summary" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "type": { "anyOf": [ { @@ -8338,6 +8427,9 @@ "title": "Type" } }, + "required": [ + "tool_type" + ], "title": "EXASearchTool", "type": "object" }, @@ -8444,7 +8536,16 @@ "type": "object" } }, - "properties": {}, + "properties": { + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + } + }, + "required": [ + "tool_type" + ], "title": "FileCompressorTool", "type": "object" }, @@ -8546,8 +8647,16 @@ ], "default": null, "title": "File Path" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "FileReadTool", "type": "object" }, @@ -8637,7 +8746,16 @@ "type": "object" } }, - "properties": {}, + "properties": { + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + } + }, + "required": [ + "tool_type" + ], "title": "FileWriterTool", "type": "object" }, @@ -8760,8 +8878,16 @@ } ], "title": "Config" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "FirecrawlCrawlWebsiteTool", "type": "object" }, @@ -8851,8 +8977,16 @@ "additionalProperties": true, "title": "Config", "type": "object" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "FirecrawlScrapeWebsiteTool", "type": "object" }, @@ -8949,8 +9083,16 @@ } ], "title": "Config" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "FirecrawlSearchTool", "type": "object" }, @@ -9045,8 +9187,16 @@ ], "description": "The user's Personal Access Token to access CrewAI AMP API. If not provided, it will be loaded from the environment variable CREWAI_PERSONAL_ACCESS_TOKEN.", "title": "Personal Access Token" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "GenerateCrewaiAutomationTool", "type": "object" }, @@ -10114,10 +10264,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ - "gh_token" + "gh_token", + "tool_type" ], "title": "GithubSearchTool", "type": "object" @@ -10227,8 +10383,16 @@ ], "default": null, "title": "Hyperbrowser" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "HyperbrowserLoadTool", "type": "object" }, @@ -10331,11 +10495,17 @@ "default": 600, "title": "Max Polling Time", "type": "integer" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ "crew_api_url", - "crew_bearer_token" + "crew_bearer_token", + "tool_type" ], "title": "InvokeCrewAIAutomationTool", "type": "object" @@ -11380,8 +11550,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "JSONSearchTool", "type": "object" }, @@ -11471,6 +11649,11 @@ "title": "Headers", "type": "object" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "website_url": { "anyOf": [ { @@ -11484,6 +11667,9 @@ "title": "Website Url" } }, + "required": [ + "tool_type" + ], "title": "JinaScrapeWebsiteTool", "type": "object" }, @@ -11554,7 +11740,16 @@ "type": "object" } }, - "properties": {}, + "properties": { + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + } + }, + "required": [ + "tool_type" + ], "title": "LinkupSearchTool", "type": "object" }, @@ -11614,12 +11809,18 @@ "properties": { "llama_index_tool": { "title": "Llama Index Tool" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ "name", "description", - "llama_index_tool" + "llama_index_tool", + "tool_type" ], "title": "LlamaIndexTool", "type": "object" @@ -12654,8 +12855,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "MDXSearchTool", "type": "object" }, @@ -12767,6 +12976,11 @@ "description": "UUID of the Agent Handler Tool Pack to use", "title": "Tool Pack Id", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ @@ -12774,7 +12988,8 @@ "description", "tool_pack_id", "registered_user_id", - "tool_name" + "tool_name", + "tool_type" ], "title": "MergeAgentHandlerTool", "type": "object" @@ -12958,6 +13173,11 @@ "title": "Text Key", "type": "string" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "vector_index_name": { "default": "vector_index", "description": "Name of the Atlas Search vector index", @@ -12968,7 +13188,8 @@ "required": [ "database_name", "collection_name", - "connection_string" + "connection_string", + "tool_type" ], "title": "MongoDBVectorSearchTool", "type": "object" @@ -13075,8 +13296,16 @@ ], "default": null, "title": "Session Id" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "MultiOnTool", "type": "object" }, @@ -14117,10 +14346,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ - "db_uri" + "db_uri", + "tool_type" ], "title": "MySQLSearchTool", "type": "object" @@ -14216,10 +14451,16 @@ }, "title": "Tables", "type": "array" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ - "db_uri" + "db_uri", + "tool_type" ], "title": "NL2SQLTool", "type": "object" @@ -14408,6 +14649,12 @@ "title": "Is Litellm", "type": "boolean" }, + "llm_type": { + "const": "litellm", + "default": "litellm", + "title": "Llm Type", + "type": "string" + }, "logit_bias": { "anyOf": [ { @@ -14622,8 +14869,16 @@ "properties": { "llm": { "$ref": "#/$defs/LLM" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "OCRTool", "type": "object" }, @@ -14820,11 +15075,17 @@ }, "oxylabs_api": { "title": "Oxylabs Api" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ "oxylabs_api", - "config" + "config", + "tool_type" ], "title": "OxylabsAmazonProductScraperTool", "type": "object" @@ -15049,11 +15310,17 @@ }, "oxylabs_api": { "title": "Oxylabs Api" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ "oxylabs_api", - "config" + "config", + "tool_type" ], "title": "OxylabsAmazonSearchScraperTool", "type": "object" @@ -15291,11 +15558,17 @@ }, "oxylabs_api": { "title": "Oxylabs Api" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ "oxylabs_api", - "config" + "config", + "tool_type" ], "title": "OxylabsGoogleSearchScraperTool", "type": "object" @@ -15481,11 +15754,17 @@ }, "oxylabs_api": { "title": "Oxylabs Api" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ "oxylabs_api", - "config" + "config", + "tool_type" ], "title": "OxylabsUniversalScraperTool", "type": "object" @@ -16543,8 +16822,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "PDFSearchTool", "type": "object" }, @@ -16626,8 +16913,16 @@ "default": "https://api.parallel.ai/v1beta/search", "title": "Search Url", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "ParallelSearchTool", "type": "object" }, @@ -16786,8 +17081,16 @@ }, "title": "Evaluators", "type": "array" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "PatronusEvalTool", "type": "object" }, @@ -16853,11 +17156,17 @@ "evaluator": { "title": "Evaluator", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ "evaluator", - "evaluated_model_gold_answer" + "evaluated_model_gold_answer", + "tool_type" ], "title": "PatronusLocalEvaluatorTool", "type": "object" @@ -16963,8 +17272,16 @@ }, "title": "Evaluators", "type": "array" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "PatronusPredefinedCriteriaEvalTool", "type": "object" }, @@ -17153,10 +17470,16 @@ "description": "Base package path for Qdrant. Will dynamically import client and models.", "title": "Qdrant Package", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ - "qdrant_config" + "qdrant_config", + "tool_type" ], "title": "QdrantVectorSearchTool", "type": "object" @@ -18226,8 +18549,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "RagTool", "type": "object" }, @@ -18323,6 +18654,11 @@ ], "title": "Headers" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "website_url": { "anyOf": [ { @@ -18336,6 +18672,9 @@ "title": "Website Url" } }, + "required": [ + "tool_type" + ], "title": "ScrapeElementFromWebsiteTool", "type": "object" }, @@ -18435,6 +18774,11 @@ ], "title": "Headers" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "website_url": { "anyOf": [ { @@ -18448,6 +18792,9 @@ "title": "Website Url" } }, + "required": [ + "tool_type" + ], "title": "ScrapeWebsiteTool", "type": "object" }, @@ -18537,6 +18884,11 @@ "title": "Enable Logging", "type": "boolean" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "user_prompt": { "anyOf": [ { @@ -18562,6 +18914,9 @@ "title": "Website Url" } }, + "required": [ + "tool_type" + ], "title": "ScrapegraphScrapeTool", "type": "object" }, @@ -18662,8 +19017,16 @@ ], "default": null, "title": "Scrapfly" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "ScrapflyScrapeWebsiteTool", "type": "object" }, @@ -18821,6 +19184,11 @@ "default": false, "title": "Return Html" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "wait_time": { "anyOf": [ { @@ -18846,6 +19214,9 @@ "title": "Website Url" } }, + "required": [ + "tool_type" + ], "title": "SeleniumScrapingTool", "type": "object" }, @@ -18935,8 +19306,16 @@ ], "default": null, "title": "Client" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "SerpApiGoogleSearchTool", "type": "object" }, @@ -19032,8 +19411,16 @@ ], "default": null, "title": "Client" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "SerpApiGoogleShoppingTool", "type": "object" }, @@ -19175,8 +19562,16 @@ "default": "search", "title": "Search Type", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "SerperDevTool", "type": "object" }, @@ -19247,7 +19642,16 @@ "type": "object" } }, - "properties": {}, + "properties": { + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + } + }, + "required": [ + "tool_type" + ], "title": "SerperScrapeWebsiteTool", "type": "object" }, @@ -20335,8 +20739,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "SerplyJobSearchTool", "type": "object" }, @@ -20450,8 +20862,16 @@ "default": "https://api.serply.io/v1/news/", "title": "Search Url", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "SerplyNewsSearchTool", "type": "object" }, @@ -20565,8 +20985,16 @@ "default": "https://api.serply.io/v1/scholar/", "title": "Search Url", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "SerplyScholarSearchTool", "type": "object" }, @@ -20716,8 +21144,16 @@ "default": "https://api.serply.io/v1/search/", "title": "Search Url", "type": "string" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "SerplyWebSearchTool", "type": "object" }, @@ -21798,8 +22234,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "SerplyWebpageToMarkdownTool", "type": "object" }, @@ -21940,8 +22384,16 @@ ], "default": null, "title": "Connection Pool" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "SingleStoreSearchTool", "type": "object" }, @@ -22152,10 +22604,16 @@ "description": "Delay between retries in seconds", "title": "Retry Delay", "type": "number" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, "required": [ - "config" + "config", + "tool_type" ], "title": "SnowflakeSearchTool", "type": "object" @@ -22342,6 +22800,11 @@ "default": null, "title": "Spider" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "website_url": { "anyOf": [ { @@ -22355,6 +22818,9 @@ "title": "Website Url" } }, + "required": [ + "tool_type" + ], "title": "SpiderTool", "type": "object" }, @@ -22517,6 +22983,11 @@ "default": "https://api.stagehand.browserbase.com/v1", "title": "Server Url" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "use_simplified_dom": { "default": true, "title": "Use Simplified Dom", @@ -22533,6 +23004,9 @@ "type": "boolean" } }, + "required": [ + "tool_type" + ], "title": "StagehandTool", "type": "object" }, @@ -23610,6 +24084,11 @@ "title": "Summarize", "type": "boolean" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "txt": { "anyOf": [ { @@ -23623,6 +24102,9 @@ "title": "Txt" } }, + "required": [ + "tool_type" + ], "title": "TXTSearchTool", "type": "object" }, @@ -23769,8 +24251,16 @@ "description": "The timeout for the extraction request in seconds.", "title": "Timeout", "type": "integer" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "TavilyExtractorTool", "type": "object" }, @@ -24017,6 +24507,11 @@ "title": "Timeout", "type": "integer" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "topic": { "default": "general", "description": "The topic to focus the search on.", @@ -24029,6 +24524,9 @@ "type": "string" } }, + "required": [ + "tool_type" + ], "title": "TavilySearchTool", "type": "object" }, @@ -24102,7 +24600,16 @@ } }, "description": "Tool for analyzing images using vision models.\n\nArgs:\n llm: Optional LLM instance to use\n model: Model identifier to use if no LLM is provided", - "properties": {}, + "properties": { + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + } + }, + "required": [ + "tool_type" + ], "title": "VisionTool", "type": "object" }, @@ -24224,6 +24731,11 @@ "default": null, "title": "Query" }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" + }, "vectorizer": { "title": "Vectorizer" }, @@ -24241,7 +24753,8 @@ "required": [ "collection_name", "weaviate_cluster_url", - "weaviate_api_key" + "weaviate_api_key", + "tool_type" ], "title": "WeaviateVectorSearchTool", "type": "object" @@ -25288,8 +25801,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "WebsiteSearchTool", "type": "object" }, @@ -26339,8 +26860,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "XMLSearchTool", "type": "object" }, @@ -27390,8 +27919,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "YoutubeChannelSearchTool", "type": "object" }, @@ -28441,8 +28978,16 @@ "default": false, "title": "Summarize", "type": "boolean" + }, + "tool_type": { + "readOnly": true, + "title": "Tool Type", + "type": "string" } }, + "required": [ + "tool_type" + ], "title": "YoutubeVideoSearchTool", "type": "object" }, diff --git a/lib/crewai/src/crewai/agent/core.py b/lib/crewai/src/crewai/agent/core.py index 66554c59d..c86d7112c 100644 --- a/lib/crewai/src/crewai/agent/core.py +++ b/lib/crewai/src/crewai/agent/core.py @@ -9,8 +9,6 @@ import contextvars from datetime import datetime import json from pathlib import Path -import shutil -import subprocess import time from typing import ( TYPE_CHECKING, @@ -116,7 +114,6 @@ except ImportError: if TYPE_CHECKING: from crewai_files import FileInput - from crewai_tools import CodeInterpreterTool from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig from crewai.agents.agent_builder.base_agent import PlatformAppOrAction @@ -211,7 +208,9 @@ class Agent(BaseAgent): default=None, description="Response format for the agent." ) allow_code_execution: bool | None = Field( - default=False, description="Enable code execution for the agent." + default=False, + deprecated=True, + description="Deprecated. CodeInterpreterTool is no longer available. Use dedicated sandbox services instead.", ) respect_context_window: bool = Field( default=True, @@ -236,7 +235,8 @@ class Agent(BaseAgent): ) code_execution_mode: Literal["safe", "unsafe"] = Field( default="safe", - description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).", + deprecated=True, + description="Deprecated. CodeInterpreterTool is no longer available. Use dedicated sandbox services instead.", ) planning_config: PlanningConfig | None = Field( default=None, @@ -329,7 +329,13 @@ class Agent(BaseAgent): self._setup_agent_executor() if self.allow_code_execution: - self._validate_docker_installation() + warnings.warn( + "allow_code_execution is deprecated and will be removed in v2.0. " + "CodeInterpreterTool is no longer available. " + "Use dedicated sandbox services like E2B or Modal.", + DeprecationWarning, + stacklevel=2, + ) self.set_skills() @@ -1123,20 +1129,15 @@ class Agent(BaseAgent): return [AddImageTool()] - def get_code_execution_tools(self) -> list[CodeInterpreterTool]: - """Return code interpreter tools based on the agent's execution mode.""" - try: - from crewai_tools import ( - CodeInterpreterTool, - ) - - unsafe_mode = self.code_execution_mode == "unsafe" - return [CodeInterpreterTool(unsafe_mode=unsafe_mode)] - except ModuleNotFoundError: - self._logger.log( - "info", "Coding tools not available. Install crewai_tools. " - ) - return [] + def get_code_execution_tools(self) -> list[Any]: + """Deprecated: CodeInterpreterTool is no longer available.""" + warnings.warn( + "CodeInterpreterTool is no longer available. " + "Use dedicated sandbox services like E2B or Modal.", + DeprecationWarning, + stacklevel=2, + ) + return [] @staticmethod def get_output_converter( @@ -1216,28 +1217,14 @@ class Agent(BaseAgent): self._logger.log("warning", f"Failed to inject date: {e!s}") def _validate_docker_installation(self) -> None: - """Check if Docker is installed and running.""" - docker_path = shutil.which("docker") - if not docker_path: - raise RuntimeError( - f"Docker is not installed. Please install Docker to use code execution with agent: {self.role}" - ) - - try: - subprocess.run( # noqa: S603 - [str(docker_path), "info"], - check=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - except subprocess.CalledProcessError as e: - raise RuntimeError( - f"Docker is not running. Please start Docker to use code execution with agent: {self.role}" - ) from e - except subprocess.TimeoutExpired as e: - raise RuntimeError( - f"Docker command timed out. Please check your Docker installation for agent: {self.role}" - ) from e + """Deprecated: No-op. CodeInterpreterTool is no longer available.""" + warnings.warn( + "CodeInterpreterTool is no longer available. " + "Use dedicated sandbox services like E2B or Modal.", + DeprecationWarning, + stacklevel=2, + ) + return def __repr__(self) -> str: return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})" diff --git a/lib/crewai/tests/test_crew.py b/lib/crewai/tests/test_crew.py index 9621a1f0d..9db9ef4e2 100644 --- a/lib/crewai/tests/test_crew.py +++ b/lib/crewai/tests/test_crew.py @@ -48,7 +48,6 @@ from crewai.tools.agent_tools.add_image_tool import AddImageTool from crewai.types.usage_metrics import UsageMetrics from crewai.utilities.rpm_controller import RPMController from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler -from crewai_tools import CodeInterpreterTool from pydantic import BaseModel, Field import pydantic_core import pytest @@ -1648,11 +1647,8 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff(): _, kwargs = mock_execute_sync.call_args used_tools = kwargs["tools"] - # Verify that exactly one tool was used and it was a CodeInterpreterTool - assert len(used_tools) == 1, "Should have exactly one tool" - assert isinstance(used_tools[0], CodeInterpreterTool), ( - "Tool should be CodeInterpreterTool" - ) + # CodeInterpreterTool was removed; get_code_execution_tools() now returns [] + assert len(used_tools) == 0, "Should have no tools (code execution tools are deprecated)" @pytest.mark.vcr() @@ -3918,16 +3914,13 @@ def test_task_tools_preserve_code_execution_tools(): assert any(isinstance(tool, TestTool) for tool in used_tools), ( "Task's TestTool should be present" ) - assert any(isinstance(tool, CodeInterpreterTool) for tool in used_tools), ( - "CodeInterpreterTool should be present" - ) assert any("delegate" in tool.name.lower() for tool in used_tools), ( "Delegation tool should be present" ) - # Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools) - assert len(used_tools) == 4, ( - "Should have TestTool, CodeInterpreter, and 2 delegation tools" + # Verify the total number of tools (TestTool + 2 delegation tools; CodeInterpreterTool removed) + assert len(used_tools) == 3, ( + "Should have TestTool and 2 delegation tools" ) From 5b4a0e8734300379ec4a2440c89d350a9461c7d8 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Tue, 7 Apr 2026 23:22:58 +0800 Subject: [PATCH 09/21] feat: bump versions to 1.14.0a4 --- lib/crewai-files/src/crewai_files/__init__.py | 2 +- lib/crewai-tools/pyproject.toml | 2 +- lib/crewai-tools/src/crewai_tools/__init__.py | 2 +- lib/crewai/pyproject.toml | 2 +- lib/crewai/src/crewai/__init__.py | 2 +- .../crewai/cli/templates/crew/pyproject.toml | 2 +- .../crewai/cli/templates/flow/pyproject.toml | 2 +- .../crewai/cli/templates/tool/pyproject.toml | 2 +- lib/devtools/src/crewai_devtools/__init__.py | 2 +- uv.lock | 18 +----------------- 10 files changed, 10 insertions(+), 26 deletions(-) diff --git a/lib/crewai-files/src/crewai_files/__init__.py b/lib/crewai-files/src/crewai_files/__init__.py index 35bc21fa8..9df9a3b65 100644 --- a/lib/crewai-files/src/crewai_files/__init__.py +++ b/lib/crewai-files/src/crewai_files/__init__.py @@ -152,4 +152,4 @@ __all__ = [ "wrap_file_source", ] -__version__ = "1.14.0a3" +__version__ = "1.14.0a4" diff --git a/lib/crewai-tools/pyproject.toml b/lib/crewai-tools/pyproject.toml index 9fa051003..6be197911 100644 --- a/lib/crewai-tools/pyproject.toml +++ b/lib/crewai-tools/pyproject.toml @@ -10,7 +10,7 @@ requires-python = ">=3.10, <3.14" dependencies = [ "pytube~=15.0.0", "requests~=2.32.5", - "crewai==1.14.0a3", + "crewai==1.14.0a4", "tiktoken~=0.8.0", "beautifulsoup4~=4.13.4", "python-docx~=1.2.0", diff --git a/lib/crewai-tools/src/crewai_tools/__init__.py b/lib/crewai-tools/src/crewai_tools/__init__.py index bdaa0499b..5db3d05f1 100644 --- a/lib/crewai-tools/src/crewai_tools/__init__.py +++ b/lib/crewai-tools/src/crewai_tools/__init__.py @@ -305,4 +305,4 @@ __all__ = [ "ZapierActionTools", ] -__version__ = "1.14.0a3" +__version__ = "1.14.0a4" diff --git a/lib/crewai/pyproject.toml b/lib/crewai/pyproject.toml index a09fb4461..f845cd0a2 100644 --- a/lib/crewai/pyproject.toml +++ b/lib/crewai/pyproject.toml @@ -55,7 +55,7 @@ Repository = "https://github.com/crewAIInc/crewAI" [project.optional-dependencies] tools = [ - "crewai-tools==1.14.0a3", + "crewai-tools==1.14.0a4", ] embeddings = [ "tiktoken~=0.8.0" diff --git a/lib/crewai/src/crewai/__init__.py b/lib/crewai/src/crewai/__init__.py index 8a7d6dd3f..3df431554 100644 --- a/lib/crewai/src/crewai/__init__.py +++ b/lib/crewai/src/crewai/__init__.py @@ -46,7 +46,7 @@ def _suppress_pydantic_deprecation_warnings() -> None: _suppress_pydantic_deprecation_warnings() -__version__ = "1.14.0a3" +__version__ = "1.14.0a4" _telemetry_submitted = False diff --git a/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml b/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml index f48c68b3d..f2f9481be 100644 --- a/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml +++ b/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml @@ -5,7 +5,7 @@ description = "{{name}} using crewAI" authors = [{ name = "Your Name", email = "you@example.com" }] requires-python = ">=3.10,<3.14" dependencies = [ - "crewai[tools]==1.14.0a3" + "crewai[tools]==1.14.0a4" ] [project.scripts] diff --git a/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml b/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml index 59c0f7c91..348e13f1b 100644 --- a/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml +++ b/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml @@ -5,7 +5,7 @@ description = "{{name}} using crewAI" authors = [{ name = "Your Name", email = "you@example.com" }] requires-python = ">=3.10,<3.14" dependencies = [ - "crewai[tools]==1.14.0a3" + "crewai[tools]==1.14.0a4" ] [project.scripts] diff --git a/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml b/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml index 7d986532e..43410c18f 100644 --- a/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml +++ b/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml @@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}" readme = "README.md" requires-python = ">=3.10,<3.14" dependencies = [ - "crewai[tools]==1.14.0a3" + "crewai[tools]==1.14.0a4" ] [tool.crewai] diff --git a/lib/devtools/src/crewai_devtools/__init__.py b/lib/devtools/src/crewai_devtools/__init__.py index 8fa56e938..790ab4d18 100644 --- a/lib/devtools/src/crewai_devtools/__init__.py +++ b/lib/devtools/src/crewai_devtools/__init__.py @@ -1,3 +1,3 @@ """CrewAI development tools.""" -__version__ = "1.14.0a3" +__version__ = "1.14.0a4" diff --git a/uv.lock b/uv.lock index 66b886731..2f0922173 100644 --- a/uv.lock +++ b/uv.lock @@ -13,7 +13,7 @@ resolution-markers = [ ] [options] -exclude-newer = "2026-04-03T16:45:28.209407Z" +exclude-newer = "2026-04-04T15:11:41.651093Z" exclude-newer-span = "P3D" [manifest] @@ -1400,7 +1400,6 @@ source = { editable = "lib/crewai-tools" } dependencies = [ { name = "beautifulsoup4" }, { name = "crewai" }, - { name = "docker" }, { name = "pymupdf" }, { name = "python-docx" }, { name = "pytube" }, @@ -1537,7 +1536,6 @@ requires-dist = [ { name = "crewai", editable = "lib/crewai" }, { name = "cryptography", marker = "extra == 'snowflake'", specifier = ">=43.0.3" }, { name = "databricks-sdk", marker = "extra == 'databricks-sdk'", specifier = ">=0.46.0" }, - { name = "docker", specifier = "~=7.1.0" }, { name = "exa-py", marker = "extra == 'exa-py'", specifier = ">=1.8.7" }, { name = "firecrawl-py", marker = "extra == 'firecrawl-py'", specifier = ">=1.8.0" }, { name = "gitpython", marker = "extra == 'github'", specifier = ">=3.1.41,<4" }, @@ -1820,20 +1818,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ba/5a/18ad964b0086c6e62e2e7500f7edc89e3faa45033c71c1893d34eed2b2de/dnspython-2.8.0-py3-none-any.whl", hash = "sha256:01d9bbc4a2d76bf0db7c1f729812ded6d912bd318d3b1cf81d30c0f845dbf3af", size = 331094, upload-time = "2025-09-07T18:57:58.071Z" }, ] -[[package]] -name = "docker" -version = "7.1.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pywin32", marker = "sys_platform == 'win32'" }, - { name = "requests" }, - { name = "urllib3" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/91/9b/4a2ea29aeba62471211598dac5d96825bb49348fa07e906ea930394a83ce/docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", size = 117834, upload-time = "2024-05-23T11:13:57.216Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774, upload-time = "2024-05-23T11:13:55.01Z" }, -] - [[package]] name = "docling" version = "2.75.0" From 25e7ca03c4618b5e3a8e2c999ee2952107ff67e5 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Tue, 7 Apr 2026 23:29:21 +0800 Subject: [PATCH 10/21] docs: update changelog and version for v1.14.0a4 --- docs/ar/changelog.mdx | 33 +++++++++++++++++++++++++++++++++ docs/en/changelog.mdx | 33 +++++++++++++++++++++++++++++++++ docs/ko/changelog.mdx | 33 +++++++++++++++++++++++++++++++++ docs/pt-BR/changelog.mdx | 33 +++++++++++++++++++++++++++++++++ 4 files changed, 132 insertions(+) diff --git a/docs/ar/changelog.mdx b/docs/ar/changelog.mdx index 973f844a5..5f5482eb7 100644 --- a/docs/ar/changelog.mdx +++ b/docs/ar/changelog.mdx @@ -4,6 +4,39 @@ description: "تحديثات المنتج والتحسينات وإصلاحات icon: "clock" mode: "wide" --- + + ## v1.14.0a4 + + [عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.0a4) + + ## ما الذي تغير + + ### الميزات + - إضافة guardrail_type و name لتمييز الآثار + - إضافة SqliteProvider لتخزين نقاط التحقق + - إضافة CheckpointConfig للتخزين التلقائي لنقاط التحقق + - تنفيذ نقاط التحقق لحالة التشغيل، نظام الأحداث، وإعادة هيكلة المنفذ + + ### إصلاحات الأخطاء + - استبعاد متجهات التضمين من تسلسل الذاكرة لتوفير الرموز + - رفع litellm إلى >=1.83.0 لمعالجة CVE-2026-35030 + + ### الوثائق + - تحديث أدلة البدء السريع والتثبيت لتحسين الوضوح + - إضافة قسم مقدمي التخزين وتصدير JsonProvider + + ### الأداء + - استخدام JSONB لعمود بيانات نقاط التحقق + + ### إعادة الهيكلة + - إزالة CodeInterpreterTool وإهمال معلمات تنفيذ الكود + + ## المساهمون + + @alex-clawd, @github-actions[bot], @greysonlalonde, @joaomdmoura, @lorenzejay, @lucasgomide + + + ## v1.14.0a3 diff --git a/docs/en/changelog.mdx b/docs/en/changelog.mdx index 53abe1587..b2ab728a7 100644 --- a/docs/en/changelog.mdx +++ b/docs/en/changelog.mdx @@ -4,6 +4,39 @@ description: "Product updates, improvements, and bug fixes for CrewAI" icon: "clock" mode: "wide" --- + + ## v1.14.0a4 + + [View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.0a4) + + ## What's Changed + + ### Features + - Add guardrail_type and name to distinguish traces + - Add SqliteProvider for checkpoint storage + - Add CheckpointConfig for automatic checkpointing + - Implement runtime state checkpointing, event system, and executor refactor + + ### Bug Fixes + - Exclude embedding vectors from memory serialization to save tokens + - Bump litellm to >=1.83.0 to address CVE-2026-35030 + + ### Documentation + - Update quickstart and installation guides for improved clarity + - Add storage providers section and export JsonProvider + + ### Performance + - Use JSONB for checkpoint data column + + ### Refactoring + - Remove CodeInterpreterTool and deprecate code execution params + + ## Contributors + + @alex-clawd, @github-actions[bot], @greysonlalonde, @joaomdmoura, @lorenzejay, @lucasgomide + + + ## v1.14.0a3 diff --git a/docs/ko/changelog.mdx b/docs/ko/changelog.mdx index e5b364852..5c3a98abf 100644 --- a/docs/ko/changelog.mdx +++ b/docs/ko/changelog.mdx @@ -4,6 +4,39 @@ description: "CrewAI의 제품 업데이트, 개선 사항 및 버그 수정" icon: "clock" mode: "wide" --- + + ## v1.14.0a4 + + [GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.0a4) + + ## 변경 사항 + + ### 기능 + - 추적을 구분하기 위해 guardrail_type 및 이름 추가 + - 체크포인트 저장을 위한 SqliteProvider 추가 + - 자동 체크포인트 생성을 위한 CheckpointConfig 추가 + - 런타임 상태 체크포인트, 이벤트 시스템 및 실행기 리팩토링 구현 + + ### 버그 수정 + - 토큰 절약을 위해 메모리 직렬화에서 임베딩 벡터 제외 + - CVE-2026-35030 문제를 해결하기 위해 litellm을 >=1.83.0으로 업데이트 + + ### 문서 + - 명확성을 개선하기 위해 빠른 시작 및 설치 가이드 업데이트 + - 저장소 제공자 섹션 추가 및 JsonProvider 내보내기 + + ### 성능 + - 체크포인트 데이터 열에 JSONB 사용 + + ### 리팩토링 + - CodeInterpreterTool 제거 및 코드 실행 매개변수 사용 중단 + + ## 기여자 + + @alex-clawd, @github-actions[bot], @greysonlalonde, @joaomdmoura, @lorenzejay, @lucasgomide + + + ## v1.14.0a3 diff --git a/docs/pt-BR/changelog.mdx b/docs/pt-BR/changelog.mdx index ae5252560..b6cd3aa42 100644 --- a/docs/pt-BR/changelog.mdx +++ b/docs/pt-BR/changelog.mdx @@ -4,6 +4,39 @@ description: "Atualizações de produto, melhorias e correções do CrewAI" icon: "clock" mode: "wide" --- + + ## v1.14.0a4 + + [Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.0a4) + + ## O que Mudou + + ### Recursos + - Adicionar guardrail_type e nome para distinguir rastros + - Adicionar SqliteProvider para armazenamento de checkpoints + - Adicionar CheckpointConfig para checkpointing automático + - Implementar checkpointing de estado em tempo de execução, sistema de eventos e refatoração do executor + + ### Correções de Bugs + - Excluir vetores de incorporação da serialização de memória para economizar tokens + - Atualizar litellm para >=1.83.0 para resolver CVE-2026-35030 + + ### Documentação + - Atualizar guias de início rápido e instalação para melhor clareza + - Adicionar seção de provedores de armazenamento e exportar JsonProvider + + ### Desempenho + - Usar JSONB para a coluna de dados de checkpoint + + ### Refatoração + - Remover CodeInterpreterTool e descontinuar parâmetros de execução de código + + ## Contribuidores + + @alex-clawd, @github-actions[bot], @greysonlalonde, @joaomdmoura, @lorenzejay, @lucasgomide + + + ## v1.14.0a3 From 9325e2f6a4608a2869872c464c1e2bc8ddecfd4c Mon Sep 17 00:00:00 2001 From: alex-clawd Date: Tue, 7 Apr 2026 09:29:45 -0700 Subject: [PATCH 11/21] fix: add path and URL validation to RAG tools (#5310) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: add path and URL validation to RAG tools Add validation utilities to prevent unauthorized file reads and SSRF when RAG tools accept LLM-controlled paths/URLs at runtime. Changes: - New crewai_tools.utilities.safe_path module with validate_file_path(), validate_directory_path(), and validate_url() - File paths validated against base directory (defaults to cwd). Resolves symlinks and ../ traversal. Rejects escape attempts. - URLs validated: file:// blocked entirely. HTTP/HTTPS resolves DNS and blocks private/reserved IPs (10.x, 172.16-31.x, 192.168.x, 127.x, 169.254.x, 0.0.0.0, ::1, fc00::/7). - Validation applied in RagTool.add() — catches all RAG search tools (JSON, CSV, PDF, TXT, DOCX, MDX, Directory, etc.) - Removed file:// scheme support from DataTypes.from_content() - CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true env var for backward compat - 27 tests covering traversal, symlinks, private IPs, cloud metadata, IPv6, escape hatch, and valid paths/URLs * fix: validate path/URL keyword args in RagTool.add() The original patch validated positional *args but left all keyword arguments (path=, file_path=, directory_path=, url=, website=, github_url=, youtube_url=) unvalidated, providing a trivial bypass for both path-traversal and SSRF checks. Applies validate_file_path() to path/file_path/directory_path kwargs and validate_url() to url/website/github_url/youtube_url kwargs before they reach the adapter. Adds a regression-test file covering all eight kwarg vectors plus the two existing positional-arg checks. Co-Authored-By: Claude Sonnet 4.6 * fix: address CodeQL and review comments on RAG path/URL validation - Replace insecure tempfile.mktemp() with inline symlink target in test - Remove unused 'target' variable and unused tempfile import - Narrow broad except Exception: pass to only catch urlparse errors; validate_url ValueError now propagates instead of being silently swallowed - Fix ruff B904 (raise-without-from-inside-except) in safe_path.py - Fix ruff B007 (unused loop variable 'family') in safe_path.py - Use validate_directory_path in DirectorySearchTool.add() so the public utility is exercised in production code Co-Authored-By: Claude Sonnet 4.6 * style: fix ruff format + remaining lint issues * fix: resolve mypy type errors in RAG path/URL validation - Cast sockaddr[0] to str() to satisfy mypy (socket.getaddrinfo returns sockaddr where [0] is str but typed as str | int) - Remove now-unnecessary `type: ignore[assignment]` and `type: ignore[literal-required]` comments in rag_tool.py Co-Authored-By: Claude Sonnet 4.6 * fix: unroll dynamic TypedDict key loops to satisfy mypy literal-required Co-Authored-By: Claude Sonnet 4.6 * test: allow tmp paths in RAG data-type tests via CREWAI_TOOLS_ALLOW_UNSAFE_PATHS TemporaryDirectory creates files under /tmp/ which is outside CWD and is correctly blocked by the new path validation. These tests exercise data-type handling, not security, so add an autouse fixture that sets CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true for the whole file. Path/URL security is covered by test_rag_tool_path_validation.py. Co-Authored-By: Claude Sonnet 4.6 * test: allow tmp paths in search-tool and rag_tool tests via CREWAI_TOOLS_ALLOW_UNSAFE_PATHS test_search_tools.py has tests for TXTSearchTool, CSVSearchTool, MDXSearchTool, JSONSearchTool, and DirectorySearchTool that create files under /tmp/ via tempfile, which is outside CWD and correctly blocked by the new path validation. rag_tool_test.py has one test that calls tool.add() with a TemporaryDirectory path. Add the same autouse allow_tmp_paths fixture used in test_rag_tool_add_data_type.py. Security is covered separately by test_rag_tool_path_validation.py. Co-Authored-By: Claude Sonnet 4.6 * chore: update tool specifications * docs: document CodeInterpreterTool removal and RAG path/URL validation Co-Authored-By: Claude Sonnet 4.6 * fix: address three review comments on path/URL validation - safe_path._is_private_or_reserved: after unwrapping IPv4-mapped IPv6 to IPv4, only check against IPv4 networks to avoid TypeError when comparing an IPv4Address against IPv6Network objects. - safe_path.validate_file_path: handle filesystem-root base_dir ('/') by not appending os.sep when the base already ends with a separator, preventing the '//'-prefix bug. - rag_tool.add: path-detection heuristic now checks for both '/' and os.sep so forward-slash paths are caught on Windows as well as Unix. Co-Authored-By: Claude Sonnet 4.6 * fix: remove unused _BLOCKED_NETWORKS variable after IPv4/IPv6 split * chore: update tool specifications --------- Co-authored-by: Claude Sonnet 4.6 Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- docs/ar/concepts/agents.mdx | 20 +- docs/ar/tools/ai-ml/codeinterpretertool.mdx | 4 + docs/ar/tools/file-document/csvsearchtool.mdx | 16 ++ .../file-document/directorysearchtool.mdx | 12 + .../ar/tools/file-document/jsonsearchtool.mdx | 16 ++ docs/ar/tools/file-document/pdfsearchtool.mdx | 16 ++ docs/en/concepts/agents.mdx | 20 +- docs/en/tools/ai-ml/codeinterpretertool.mdx | 4 + docs/en/tools/file-document/csvsearchtool.mdx | 16 ++ .../file-document/directorysearchtool.mdx | 12 + .../en/tools/file-document/jsonsearchtool.mdx | 16 ++ docs/en/tools/file-document/pdfsearchtool.mdx | 16 ++ docs/ko/concepts/agents.mdx | 21 +- docs/ko/tools/ai-ml/codeinterpretertool.mdx | 4 + docs/ko/tools/file-document/csvsearchtool.mdx | 16 ++ .../file-document/directorysearchtool.mdx | 12 + .../ko/tools/file-document/jsonsearchtool.mdx | 16 ++ docs/ko/tools/file-document/pdfsearchtool.mdx | 16 ++ docs/pt-BR/concepts/agents.mdx | 21 +- .../pt-BR/tools/ai-ml/codeinterpretertool.mdx | 4 + .../tools/file-document/csvsearchtool.mdx | 16 ++ .../file-document/directorysearchtool.mdx | 12 + .../tools/file-document/jsonsearchtool.mdx | 16 ++ .../tools/file-document/pdfsearchtool.mdx | 18 +- .../src/crewai_tools/rag/data_types.py | 2 +- .../directory_search_tool.py | 2 + .../src/crewai_tools/tools/rag/rag_tool.py | 79 ++++++- .../src/crewai_tools/utilities/__init__.py | 0 .../src/crewai_tools/utilities/safe_path.py | 205 ++++++++++++++++++ .../tests/tools/rag/rag_tool_test.py | 11 + .../tools/rag/test_rag_tool_add_data_type.py | 9 + .../rag/test_rag_tool_path_validation.py | 80 +++++++ .../tests/tools/test_search_tools.py | 9 + lib/crewai-tools/tests/utilities/__init__.py | 0 .../tests/utilities/test_safe_path.py | 170 +++++++++++++++ lib/crewai/src/crewai/cli/cli.py | 2 - lib/crewai/src/crewai/tasks/llm_guardrail.py | 2 +- 37 files changed, 857 insertions(+), 54 deletions(-) create mode 100644 lib/crewai-tools/src/crewai_tools/utilities/__init__.py create mode 100644 lib/crewai-tools/src/crewai_tools/utilities/safe_path.py create mode 100644 lib/crewai-tools/tests/tools/rag/test_rag_tool_path_validation.py create mode 100644 lib/crewai-tools/tests/utilities/__init__.py create mode 100644 lib/crewai-tools/tests/utilities/test_safe_path.py diff --git a/docs/ar/concepts/agents.mdx b/docs/ar/concepts/agents.mdx index fe11b2545..7ae5c668c 100644 --- a/docs/ar/concepts/agents.mdx +++ b/docs/ar/concepts/agents.mdx @@ -250,16 +250,12 @@ analysis_agent = Agent( #### تنفيذ الكود -- `allow_code_execution`: يجب أن يكون True لتشغيل الكود -- `code_execution_mode`: - - `"safe"`: يستخدم Docker (موصى به للإنتاج) - - `"unsafe"`: تنفيذ مباشر (استخدم فقط في بيئات موثوقة) + + `allow_code_execution` و`code_execution_mode` مهجوران. تمت إزالة `CodeInterpreterTool` من `crewai-tools`. استخدم خدمة بيئة معزولة مخصصة مثل [E2B](https://e2b.dev) أو [Modal](https://modal.com) لتنفيذ الكود بأمان. + - - يشغّل هذا صورة Docker افتراضية. إذا أردت تهيئة صورة Docker، - راجع أداة Code Interpreter في قسم الأدوات. أضف أداة - مفسر الكود كأداة في معامل أداة الوكيل. - +- `allow_code_execution` _(مهجور)_: كان يُمكّن تنفيذ الكود المدمج عبر `CodeInterpreterTool`. +- `code_execution_mode` _(مهجور)_: كان يتحكم في وضع التنفيذ (`"safe"` لـ Docker، `"unsafe"` للتنفيذ المباشر). #### الميزات المتقدمة @@ -332,9 +328,9 @@ print(result.raw) ### الأمان وتنفيذ الكود -- عند استخدام `allow_code_execution`، كن حذرًا مع مدخلات المستخدم وتحقق منها دائمًا -- استخدم `code_execution_mode: "safe"` (Docker) في بيئات الإنتاج -- فكّر في تعيين حدود `max_execution_time` مناسبة لمنع الحلقات اللانهائية + + `allow_code_execution` و`code_execution_mode` مهجوران وتمت إزالة `CodeInterpreterTool`. استخدم خدمة بيئة معزولة مخصصة مثل [E2B](https://e2b.dev) أو [Modal](https://modal.com) لتنفيذ الكود بأمان. + ### تحسين الأداء diff --git a/docs/ar/tools/ai-ml/codeinterpretertool.mdx b/docs/ar/tools/ai-ml/codeinterpretertool.mdx index dbcf016eb..bbaea809b 100644 --- a/docs/ar/tools/ai-ml/codeinterpretertool.mdx +++ b/docs/ar/tools/ai-ml/codeinterpretertool.mdx @@ -7,6 +7,10 @@ mode: "wide" # `CodeInterpreterTool` + + **مهجور:** تمت إزالة `CodeInterpreterTool` من `crewai-tools`. كما أن معاملَي `allow_code_execution` و`code_execution_mode` على `Agent` أصبحا مهجورَين. استخدم خدمة بيئة معزولة مخصصة — [E2B](https://e2b.dev) أو [Modal](https://modal.com) — لتنفيذ الكود بشكل آمن ومعزول. + + ## الوصف تمكّن `CodeInterpreterTool` وكلاء CrewAI من تنفيذ كود Python 3 الذي يولّدونه بشكل مستقل. هذه الوظيفة ذات قيمة خاصة لأنها تتيح للوكلاء إنشاء الكود وتنفيذه والحصول على النتائج واستخدام تلك المعلومات لاتخاذ القرارات والإجراءات اللاحقة. diff --git a/docs/ar/tools/file-document/csvsearchtool.mdx b/docs/ar/tools/file-document/csvsearchtool.mdx index f9d5d7bf8..9e4e89658 100644 --- a/docs/ar/tools/file-document/csvsearchtool.mdx +++ b/docs/ar/tools/file-document/csvsearchtool.mdx @@ -74,3 +74,19 @@ tool = CSVSearchTool( } ) ``` + +## الأمان + +### التحقق من صحة المسارات + +يتم التحقق من مسارات الملفات المقدمة لهذه الأداة مقابل مجلد العمل الحالي. يتم رفض المسارات التي تحل خارج مجلد العمل وإطلاق `ValueError`. + +للسماح بالمسارات خارج مجلد العمل (مثلاً في الاختبارات أو خطوط الأنابيب الموثوقة)، عيّن متغير البيئة التالي: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` + +### التحقق من صحة الروابط + +يتم التحقق من مدخلات الروابط: يتم حظر مخطط `file://` والطلبات التي تستهدف نطاقات IP الخاصة أو المحجوزة لمنع هجمات تزوير الطلبات من جانب الخادم (SSRF). diff --git a/docs/ar/tools/file-document/directorysearchtool.mdx b/docs/ar/tools/file-document/directorysearchtool.mdx index 2e5595865..577836ad9 100644 --- a/docs/ar/tools/file-document/directorysearchtool.mdx +++ b/docs/ar/tools/file-document/directorysearchtool.mdx @@ -68,3 +68,15 @@ tool = DirectorySearchTool( } ) ``` + +## الأمان + +### التحقق من صحة المسارات + +يتم التحقق من مسارات المجلدات المقدمة لهذه الأداة مقابل مجلد العمل الحالي. يتم رفض المسارات التي تحل خارج مجلد العمل وإطلاق `ValueError`. + +للسماح بالمسارات خارج مجلد العمل (مثلاً في الاختبارات أو خطوط الأنابيب الموثوقة)، عيّن متغير البيئة التالي: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` diff --git a/docs/ar/tools/file-document/jsonsearchtool.mdx b/docs/ar/tools/file-document/jsonsearchtool.mdx index 62ef99081..53aebacea 100644 --- a/docs/ar/tools/file-document/jsonsearchtool.mdx +++ b/docs/ar/tools/file-document/jsonsearchtool.mdx @@ -73,3 +73,19 @@ tool = JSONSearchTool( } ) ``` + +## الأمان + +### التحقق من صحة المسارات + +يتم التحقق من مسارات الملفات المقدمة لهذه الأداة مقابل مجلد العمل الحالي. يتم رفض المسارات التي تحل خارج مجلد العمل وإطلاق `ValueError`. + +للسماح بالمسارات خارج مجلد العمل (مثلاً في الاختبارات أو خطوط الأنابيب الموثوقة)، عيّن متغير البيئة التالي: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` + +### التحقق من صحة الروابط + +يتم التحقق من مدخلات الروابط: يتم حظر مخطط `file://` والطلبات التي تستهدف نطاقات IP الخاصة أو المحجوزة لمنع هجمات تزوير الطلبات من جانب الخادم (SSRF). diff --git a/docs/ar/tools/file-document/pdfsearchtool.mdx b/docs/ar/tools/file-document/pdfsearchtool.mdx index 86e0272ad..96d4b98ba 100644 --- a/docs/ar/tools/file-document/pdfsearchtool.mdx +++ b/docs/ar/tools/file-document/pdfsearchtool.mdx @@ -105,3 +105,19 @@ tool = PDFSearchTool( } ) ``` + +## الأمان + +### التحقق من صحة المسارات + +يتم التحقق من مسارات الملفات المقدمة لهذه الأداة مقابل مجلد العمل الحالي. يتم رفض المسارات التي تحل خارج مجلد العمل وإطلاق `ValueError`. + +للسماح بالمسارات خارج مجلد العمل (مثلاً في الاختبارات أو خطوط الأنابيب الموثوقة)، عيّن متغير البيئة التالي: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` + +### التحقق من صحة الروابط + +يتم التحقق من مدخلات الروابط: يتم حظر مخطط `file://` والطلبات التي تستهدف نطاقات IP الخاصة أو المحجوزة لمنع هجمات تزوير الطلبات من جانب الخادم (SSRF). diff --git a/docs/en/concepts/agents.mdx b/docs/en/concepts/agents.mdx index 5240c5a9f..ffd1a7ec6 100644 --- a/docs/en/concepts/agents.mdx +++ b/docs/en/concepts/agents.mdx @@ -308,16 +308,12 @@ multimodal_agent = Agent( #### Code Execution -- `allow_code_execution`: Must be True to run code -- `code_execution_mode`: - - `"safe"`: Uses Docker (recommended for production) - - `"unsafe"`: Direct execution (use only in trusted environments) + + `allow_code_execution` and `code_execution_mode` are deprecated. `CodeInterpreterTool` has been removed from `crewai-tools`. Use a dedicated sandbox service such as [E2B](https://e2b.dev) or [Modal](https://modal.com) for secure code execution. + - - This runs a default Docker image. If you want to configure the docker image, - the checkout the Code Interpreter Tool in the tools section. Add the code - interpreter tool as a tool in the agent as a tool parameter. - +- `allow_code_execution` _(deprecated)_: Previously enabled built-in code execution via `CodeInterpreterTool`. +- `code_execution_mode` _(deprecated)_: Previously controlled execution mode (`"safe"` for Docker, `"unsafe"` for direct execution). #### Advanced Features @@ -667,9 +663,9 @@ asyncio.run(main()) ### Security and Code Execution -- When using `allow_code_execution`, be cautious with user input and always validate it -- Use `code_execution_mode: "safe"` (Docker) in production environments -- Consider setting appropriate `max_execution_time` limits to prevent infinite loops + + `allow_code_execution` and `code_execution_mode` are deprecated and `CodeInterpreterTool` has been removed. Use a dedicated sandbox service such as [E2B](https://e2b.dev) or [Modal](https://modal.com) for secure code execution. + ### Performance Optimization diff --git a/docs/en/tools/ai-ml/codeinterpretertool.mdx b/docs/en/tools/ai-ml/codeinterpretertool.mdx index 67d371178..660c98a60 100644 --- a/docs/en/tools/ai-ml/codeinterpretertool.mdx +++ b/docs/en/tools/ai-ml/codeinterpretertool.mdx @@ -7,6 +7,10 @@ mode: "wide" # `CodeInterpreterTool` + + **Deprecated:** `CodeInterpreterTool` has been removed from `crewai-tools`. The `allow_code_execution` and `code_execution_mode` parameters on `Agent` are also deprecated. Use a dedicated sandbox service — [E2B](https://e2b.dev) or [Modal](https://modal.com) — for secure, isolated code execution. + + ## Description The `CodeInterpreterTool` enables CrewAI agents to execute Python 3 code that they generate autonomously. This functionality is particularly valuable as it allows agents to create code, execute it, obtain the results, and utilize that information to inform subsequent decisions and actions. diff --git a/docs/en/tools/file-document/csvsearchtool.mdx b/docs/en/tools/file-document/csvsearchtool.mdx index c20f8ec74..ebcfad583 100644 --- a/docs/en/tools/file-document/csvsearchtool.mdx +++ b/docs/en/tools/file-document/csvsearchtool.mdx @@ -75,4 +75,20 @@ tool = CSVSearchTool( }, } ) + +## Security + +### Path Validation + +File paths provided to this tool are validated against the current working directory. Paths that resolve outside the working directory are rejected with a `ValueError`. + +To allow paths outside the working directory (for example, in tests or trusted pipelines), set the environment variable: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` + +### URL Validation + +URL inputs are validated: `file://` URIs and requests targeting private or reserved IP ranges are blocked to prevent server-side request forgery (SSRF) attacks. ``` \ No newline at end of file diff --git a/docs/en/tools/file-document/directorysearchtool.mdx b/docs/en/tools/file-document/directorysearchtool.mdx index 9efd2e910..c6bd537e4 100644 --- a/docs/en/tools/file-document/directorysearchtool.mdx +++ b/docs/en/tools/file-document/directorysearchtool.mdx @@ -67,4 +67,16 @@ tool = DirectorySearchTool( }, } ) + +## Security + +### Path Validation + +Directory paths provided to this tool are validated against the current working directory. Paths that resolve outside the working directory are rejected with a `ValueError`. + +To allow paths outside the working directory (for example, in tests or trusted pipelines), set the environment variable: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` ``` \ No newline at end of file diff --git a/docs/en/tools/file-document/jsonsearchtool.mdx b/docs/en/tools/file-document/jsonsearchtool.mdx index 7b1737faa..2ef8e95b4 100644 --- a/docs/en/tools/file-document/jsonsearchtool.mdx +++ b/docs/en/tools/file-document/jsonsearchtool.mdx @@ -74,3 +74,19 @@ tool = JSONSearchTool( } ) ``` + +## Security + +### Path Validation + +File paths provided to this tool are validated against the current working directory. Paths that resolve outside the working directory are rejected with a `ValueError`. + +To allow paths outside the working directory (for example, in tests or trusted pipelines), set the environment variable: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` + +### URL Validation + +URL inputs are validated: `file://` URIs and requests targeting private or reserved IP ranges are blocked to prevent server-side request forgery (SSRF) attacks. diff --git a/docs/en/tools/file-document/pdfsearchtool.mdx b/docs/en/tools/file-document/pdfsearchtool.mdx index 32e05669e..d8c812f2d 100644 --- a/docs/en/tools/file-document/pdfsearchtool.mdx +++ b/docs/en/tools/file-document/pdfsearchtool.mdx @@ -105,4 +105,20 @@ tool = PDFSearchTool( }, } ) + +## Security + +### Path Validation + +File paths provided to this tool are validated against the current working directory. Paths that resolve outside the working directory are rejected with a `ValueError`. + +To allow paths outside the working directory (for example, in tests or trusted pipelines), set the environment variable: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` + +### URL Validation + +URL inputs are validated: `file://` URIs and requests targeting private or reserved IP ranges are blocked to prevent server-side request forgery (SSRF) attacks. ``` \ No newline at end of file diff --git a/docs/ko/concepts/agents.mdx b/docs/ko/concepts/agents.mdx index 21bebbb82..09d3431fc 100644 --- a/docs/ko/concepts/agents.mdx +++ b/docs/ko/concepts/agents.mdx @@ -291,15 +291,13 @@ multimodal_agent = Agent( - `max_retry_limit`: 오류 발생 시 재시도 횟수 #### 코드 실행 -- `allow_code_execution`: 코드를 실행하려면 True여야 합니다 -- `code_execution_mode`: - - `"safe"`: Docker를 사용합니다 (프로덕션에 권장) - - `"unsafe"`: 직접 실행 (신뢰할 수 있는 환경에서만 사용) - - 이 옵션은 기본 Docker 이미지를 실행합니다. Docker 이미지를 구성하려면 도구 섹션에 있는 Code Interpreter Tool을 확인하십시오. - Code Interpreter Tool을 에이전트의 도구 파라미터로 추가하십시오. - + + `allow_code_execution` 및 `code_execution_mode`는 더 이상 사용되지 않습니다. `CodeInterpreterTool`이 `crewai-tools`에서 제거되었습니다. 안전한 코드 실행을 위해 [E2B](https://e2b.dev) 또는 [Modal](https://modal.com)과 같은 전용 샌드박스 서비스를 사용하세요. + + +- `allow_code_execution` _(지원 중단)_: 이전에 `CodeInterpreterTool`을 통한 내장 코드 실행을 활성화했습니다. +- `code_execution_mode` _(지원 중단)_: 이전에 실행 모드를 제어했습니다 (Docker의 경우 `"safe"`, 직접 실행의 경우 `"unsafe"`). #### 고급 기능 - `multimodal`: 텍스트와 시각적 콘텐츠 처리를 위한 멀티모달 기능 활성화 @@ -627,9 +625,10 @@ asyncio.run(main()) ## 중요한 고려사항 및 모범 사례 ### 보안 및 코드 실행 -- `allow_code_execution`을 사용할 때는 사용자 입력에 주의하고 항상 입력 값을 검증하세요 -- 운영 환경에서는 `code_execution_mode: "safe"`(Docker)를 사용하세요 -- 무한 루프를 방지하기 위해 적절한 `max_execution_time` 제한을 설정하는 것을 고려하세요 + + + `allow_code_execution` 및 `code_execution_mode`는 더 이상 사용되지 않으며 `CodeInterpreterTool`이 제거되었습니다. 안전한 코드 실행을 위해 [E2B](https://e2b.dev) 또는 [Modal](https://modal.com)과 같은 전용 샌드박스 서비스를 사용하세요. + ### 성능 최적화 - `respect_context_window: true`를 사용하여 토큰 제한 문제를 방지하세요. diff --git a/docs/ko/tools/ai-ml/codeinterpretertool.mdx b/docs/ko/tools/ai-ml/codeinterpretertool.mdx index f5053d216..1b2ec234e 100644 --- a/docs/ko/tools/ai-ml/codeinterpretertool.mdx +++ b/docs/ko/tools/ai-ml/codeinterpretertool.mdx @@ -7,6 +7,10 @@ mode: "wide" # `CodeInterpreterTool` + + **지원 중단:** `CodeInterpreterTool`이 `crewai-tools`에서 제거되었습니다. `Agent`의 `allow_code_execution` 및 `code_execution_mode` 파라미터도 더 이상 사용되지 않습니다. 안전하고 격리된 코드 실행을 위해 전용 샌드박스 서비스 — [E2B](https://e2b.dev) 또는 [Modal](https://modal.com) — 을 사용하세요. + + ## 설명 `CodeInterpreterTool`은 CrewAI 에이전트가 자율적으로 생성한 Python 3 코드를 실행할 수 있도록 합니다. 이 기능은 에이전트가 코드를 생성하고, 실행하며, 결과를 얻고, 그 정보를 활용하여 이후의 결정과 행동에 반영할 수 있다는 점에서 특히 유용합니다. diff --git a/docs/ko/tools/file-document/csvsearchtool.mdx b/docs/ko/tools/file-document/csvsearchtool.mdx index e962b11e1..99de2cdda 100644 --- a/docs/ko/tools/file-document/csvsearchtool.mdx +++ b/docs/ko/tools/file-document/csvsearchtool.mdx @@ -76,3 +76,19 @@ tool = CSVSearchTool( } ) ``` + +## 보안 + +### 경로 유효성 검사 + +이 도구에 제공되는 파일 경로는 현재 작업 디렉터리에 대해 검증됩니다. 작업 디렉터리 외부로 확인되는 경로는 `ValueError`로 거부됩니다. + +작업 디렉터리 외부의 경로를 허용하려면 (예: 테스트 또는 신뢰할 수 있는 파이프라인), 다음 환경 변수를 설정하세요: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` + +### URL 유효성 검사 + +URL 입력도 검증됩니다: `file://` URI와 사설 또는 예약된 IP 범위를 대상으로 하는 요청은 서버 측 요청 위조(SSRF) 공격을 방지하기 위해 차단됩니다. diff --git a/docs/ko/tools/file-document/directorysearchtool.mdx b/docs/ko/tools/file-document/directorysearchtool.mdx index 5a46e53b7..4f9becef5 100644 --- a/docs/ko/tools/file-document/directorysearchtool.mdx +++ b/docs/ko/tools/file-document/directorysearchtool.mdx @@ -68,3 +68,15 @@ tool = DirectorySearchTool( } ) ``` + +## 보안 + +### 경로 유효성 검사 + +이 도구에 제공되는 디렉터리 경로는 현재 작업 디렉터리에 대해 검증됩니다. 작업 디렉터리 외부로 확인되는 경로는 `ValueError`로 거부됩니다. + +작업 디렉터리 외부의 경로를 허용하려면 (예: 테스트 또는 신뢰할 수 있는 파이프라인), 다음 환경 변수를 설정하세요: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` diff --git a/docs/ko/tools/file-document/jsonsearchtool.mdx b/docs/ko/tools/file-document/jsonsearchtool.mdx index be0a6f134..3b4a60931 100644 --- a/docs/ko/tools/file-document/jsonsearchtool.mdx +++ b/docs/ko/tools/file-document/jsonsearchtool.mdx @@ -71,3 +71,19 @@ tool = JSONSearchTool( } ) ``` + +## 보안 + +### 경로 유효성 검사 + +이 도구에 제공되는 파일 경로는 현재 작업 디렉터리에 대해 검증됩니다. 작업 디렉터리 외부로 확인되는 경로는 `ValueError`로 거부됩니다. + +작업 디렉터리 외부의 경로를 허용하려면 (예: 테스트 또는 신뢰할 수 있는 파이프라인), 다음 환경 변수를 설정하세요: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` + +### URL 유효성 검사 + +URL 입력도 검증됩니다: `file://` URI와 사설 또는 예약된 IP 범위를 대상으로 하는 요청은 서버 측 요청 위조(SSRF) 공격을 방지하기 위해 차단됩니다. diff --git a/docs/ko/tools/file-document/pdfsearchtool.mdx b/docs/ko/tools/file-document/pdfsearchtool.mdx index 573ed4812..f9cf622d5 100644 --- a/docs/ko/tools/file-document/pdfsearchtool.mdx +++ b/docs/ko/tools/file-document/pdfsearchtool.mdx @@ -102,3 +102,19 @@ tool = PDFSearchTool( } ) ``` + +## 보안 + +### 경로 유효성 검사 + +이 도구에 제공되는 파일 경로는 현재 작업 디렉터리에 대해 검증됩니다. 작업 디렉터리 외부로 확인되는 경로는 `ValueError`로 거부됩니다. + +작업 디렉터리 외부의 경로를 허용하려면 (예: 테스트 또는 신뢰할 수 있는 파이프라인), 다음 환경 변수를 설정하세요: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` + +### URL 유효성 검사 + +URL 입력도 검증됩니다: `file://` URI와 사설 또는 예약된 IP 범위를 대상으로 하는 요청은 서버 측 요청 위조(SSRF) 공격을 방지하기 위해 차단됩니다. diff --git a/docs/pt-BR/concepts/agents.mdx b/docs/pt-BR/concepts/agents.mdx index 383d501c6..69cb2e9d4 100644 --- a/docs/pt-BR/concepts/agents.mdx +++ b/docs/pt-BR/concepts/agents.mdx @@ -304,17 +304,12 @@ multimodal_agent = Agent( #### Execução de Código -- `allow_code_execution`: Deve ser True para permitir execução de código -- `code_execution_mode`: - - `"safe"`: Usa Docker (recomendado para produção) - - `"unsafe"`: Execução direta (apenas em ambientes confiáveis) + + `allow_code_execution` e `code_execution_mode` estão depreciados. O `CodeInterpreterTool` foi removido do `crewai-tools`. Use um serviço de sandbox dedicado como [E2B](https://e2b.dev) ou [Modal](https://modal.com) para execução segura de código. + - - Isso executa uma imagem Docker padrão. Se você deseja configurar a imagem - Docker, veja a ferramenta Code Interpreter na seção de ferramentas. Adicione a - ferramenta de interpretação de código como um parâmetro em ferramentas no - agente. - +- `allow_code_execution` _(depreciado)_: Anteriormente habilitava a execução de código embutida via `CodeInterpreterTool`. +- `code_execution_mode` _(depreciado)_: Anteriormente controlava o modo de execução (`"safe"` para Docker, `"unsafe"` para execução direta). #### Funcionalidades Avançadas @@ -565,9 +560,9 @@ agent = Agent( ### Segurança e Execução de Código -- Ao usar `allow_code_execution`, seja cauteloso com entradas do usuário e sempre as valide -- Use `code_execution_mode: "safe"` (Docker) em ambientes de produção -- Considere definir limites adequados de `max_execution_time` para evitar loops infinitos + + `allow_code_execution` e `code_execution_mode` estão depreciados e o `CodeInterpreterTool` foi removido. Use um serviço de sandbox dedicado como [E2B](https://e2b.dev) ou [Modal](https://modal.com) para execução segura de código. + ### Otimização de Performance diff --git a/docs/pt-BR/tools/ai-ml/codeinterpretertool.mdx b/docs/pt-BR/tools/ai-ml/codeinterpretertool.mdx index 14c4fd51d..9b48a51e4 100644 --- a/docs/pt-BR/tools/ai-ml/codeinterpretertool.mdx +++ b/docs/pt-BR/tools/ai-ml/codeinterpretertool.mdx @@ -7,6 +7,10 @@ mode: "wide" # `CodeInterpreterTool` + + **Depreciado:** O `CodeInterpreterTool` foi removido do `crewai-tools`. Os parâmetros `allow_code_execution` e `code_execution_mode` do `Agent` também estão depreciados. Use um serviço de sandbox dedicado — [E2B](https://e2b.dev) ou [Modal](https://modal.com) — para execução de código segura e isolada. + + ## Descrição O `CodeInterpreterTool` permite que agentes CrewAI executem códigos Python 3 gerados autonomamente. Essa funcionalidade é particularmente valiosa, pois permite que os agentes criem códigos, os executem, obtenham os resultados e usem essas informações para orientar decisões e ações subsequentes. diff --git a/docs/pt-BR/tools/file-document/csvsearchtool.mdx b/docs/pt-BR/tools/file-document/csvsearchtool.mdx index a2ebd3af7..59a07b3ea 100644 --- a/docs/pt-BR/tools/file-document/csvsearchtool.mdx +++ b/docs/pt-BR/tools/file-document/csvsearchtool.mdx @@ -75,4 +75,20 @@ tool = CSVSearchTool( ), ) ) + +## Segurança + +### Validação de Caminhos + +Os caminhos de arquivo fornecidos a esta ferramenta são validados em relação ao diretório de trabalho atual. Caminhos que resolvem fora do diretório de trabalho são rejeitados com um `ValueError`. + +Para permitir caminhos fora do diretório de trabalho (por exemplo, em testes ou pipelines confiáveis), defina a variável de ambiente: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` + +### Validação de URLs + +Entradas de URL também são validadas: URIs `file://` e requisições direcionadas a faixas de IP privadas ou reservadas são bloqueadas para prevenir ataques de falsificação de requisições do lado do servidor (SSRF). ``` \ No newline at end of file diff --git a/docs/pt-BR/tools/file-document/directorysearchtool.mdx b/docs/pt-BR/tools/file-document/directorysearchtool.mdx index 4093bbc8e..50685ff58 100644 --- a/docs/pt-BR/tools/file-document/directorysearchtool.mdx +++ b/docs/pt-BR/tools/file-document/directorysearchtool.mdx @@ -67,4 +67,16 @@ tool = DirectorySearchTool( }, } ) +``` + +## Segurança + +### Validação de Caminhos + +Os caminhos de diretório fornecidos a esta ferramenta são validados em relação ao diretório de trabalho atual. Caminhos que resolvem fora do diretório de trabalho são rejeitados com um `ValueError`. + +Para permitir caminhos fora do diretório de trabalho (por exemplo, em testes ou pipelines confiáveis), defina a variável de ambiente: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true ``` \ No newline at end of file diff --git a/docs/pt-BR/tools/file-document/jsonsearchtool.mdx b/docs/pt-BR/tools/file-document/jsonsearchtool.mdx index 11b76044b..ec75920e5 100644 --- a/docs/pt-BR/tools/file-document/jsonsearchtool.mdx +++ b/docs/pt-BR/tools/file-document/jsonsearchtool.mdx @@ -73,4 +73,20 @@ tool = JSONSearchTool( }, } ) + +## Segurança + +### Validação de Caminhos + +Os caminhos de arquivo fornecidos a esta ferramenta são validados em relação ao diretório de trabalho atual. Caminhos que resolvem fora do diretório de trabalho são rejeitados com um `ValueError`. + +Para permitir caminhos fora do diretório de trabalho (por exemplo, em testes ou pipelines confiáveis), defina a variável de ambiente: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` + +### Validação de URLs + +Entradas de URL também são validadas: URIs `file://` e requisições direcionadas a faixas de IP privadas ou reservadas são bloqueadas para prevenir ataques de falsificação de requisições do lado do servidor (SSRF). ``` \ No newline at end of file diff --git a/docs/pt-BR/tools/file-document/pdfsearchtool.mdx b/docs/pt-BR/tools/file-document/pdfsearchtool.mdx index 83cac48bb..f547ec80a 100644 --- a/docs/pt-BR/tools/file-document/pdfsearchtool.mdx +++ b/docs/pt-BR/tools/file-document/pdfsearchtool.mdx @@ -101,4 +101,20 @@ tool = PDFSearchTool( }, } ) -``` \ No newline at end of file +``` + +## Segurança + +### Validação de Caminhos + +Os caminhos de arquivo fornecidos a esta ferramenta são validados em relação ao diretório de trabalho atual. Caminhos que resolvem fora do diretório de trabalho são rejeitados com um `ValueError`. + +Para permitir caminhos fora do diretório de trabalho (por exemplo, em testes ou pipelines confiáveis), defina a variável de ambiente: + +```shell +CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true +``` + +### Validação de URLs + +Entradas de URL também são validadas: URIs `file://` e requisições direcionadas a faixas de IP privadas ou reservadas são bloqueadas para prevenir ataques de falsificação de requisições do lado do servidor (SSRF). \ No newline at end of file diff --git a/lib/crewai-tools/src/crewai_tools/rag/data_types.py b/lib/crewai-tools/src/crewai_tools/rag/data_types.py index 09d519ce9..2ab62f20f 100644 --- a/lib/crewai-tools/src/crewai_tools/rag/data_types.py +++ b/lib/crewai-tools/src/crewai_tools/rag/data_types.py @@ -109,7 +109,7 @@ class DataTypes: if isinstance(content, str): try: url = urlparse(content) - is_url = bool(url.scheme and url.netloc) or url.scheme == "file" + is_url = bool(url.scheme in ("http", "https") and url.netloc) except Exception: # noqa: S110 pass diff --git a/lib/crewai-tools/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py b/lib/crewai-tools/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py index d218188e7..f17c4699a 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py @@ -4,6 +4,7 @@ from pydantic import BaseModel, Field from crewai_tools.rag.data_types import DataType from crewai_tools.tools.rag.rag_tool import RagTool +from crewai_tools.utilities.safe_path import validate_directory_path class FixedDirectorySearchToolSchema(BaseModel): @@ -37,6 +38,7 @@ class DirectorySearchTool(RagTool): self._generate_description() def add(self, directory: str) -> None: # type: ignore[override] + validate_directory_path(directory) super().add(directory, data_type=DataType.DIRECTORY) def _run( # type: ignore[override] diff --git a/lib/crewai-tools/src/crewai_tools/tools/rag/rag_tool.py b/lib/crewai-tools/src/crewai_tools/tools/rag/rag_tool.py index 52fc903e9..eb7e9cefd 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/rag/rag_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/rag/rag_tool.py @@ -1,4 +1,5 @@ from abc import ABC, abstractmethod +import os from typing import Any, Literal, cast from crewai.rag.core.base_embeddings_callable import EmbeddingFunction @@ -246,7 +247,83 @@ class RagTool(BaseTool): # Auto-detect type from extension rag_tool.add("path/to/document.pdf") # auto-detects PDF """ - self.adapter.add(*args, **kwargs) + # Validate file paths and URLs before adding to prevent + # unauthorized file reads and SSRF. + from urllib.parse import urlparse + + from crewai_tools.utilities.safe_path import validate_file_path, validate_url + + def _check_url(value: str, label: str) -> None: + try: + validate_url(value) + except ValueError as e: + raise ValueError(f"Blocked unsafe {label}: {e}") from e + + def _check_path(value: str, label: str) -> None: + try: + validate_file_path(value) + except ValueError as e: + raise ValueError(f"Blocked unsafe {label}: {e}") from e + + validated_args: list[ContentItem] = [] + for arg in args: + source_ref = ( + str(arg.get("source", arg.get("content", ""))) + if isinstance(arg, dict) + else str(arg) + ) + + # Check if it's a URL — only catch urlparse-specific errors here; + # validate_url's ValueError must propagate so it is never silently bypassed. + try: + parsed = urlparse(source_ref) + except (ValueError, AttributeError): + parsed = None + + if parsed is not None and parsed.scheme in ("http", "https", "file"): + try: + validate_url(source_ref) + except ValueError as e: + raise ValueError(f"Blocked unsafe URL: {e}") from e + validated_args.append(arg) + continue + + # Check if it looks like a file path (not a plain text string). + # Check both os.sep (backslash on Windows) and "/" so that + # forward-slash paths like "sub/file.txt" are caught on all platforms. + if ( + os.path.sep in source_ref + or "/" in source_ref + or source_ref.startswith(".") + or os.path.isabs(source_ref) + ): + try: + validate_file_path(source_ref) + except ValueError as e: + raise ValueError(f"Blocked unsafe file path: {e}") from e + + validated_args.append(arg) + + # Validate keyword path/URL arguments — these are equally user-controlled + # and must not bypass the checks applied to positional args. + if "path" in kwargs and kwargs.get("path") is not None: + _check_path(str(kwargs["path"]), "path") + if "file_path" in kwargs and kwargs.get("file_path") is not None: + _check_path(str(kwargs["file_path"]), "file_path") + + if "directory_path" in kwargs and kwargs.get("directory_path") is not None: + _check_path(str(kwargs["directory_path"]), "directory_path") + + if "url" in kwargs and kwargs.get("url") is not None: + _check_url(str(kwargs["url"]), "url") + if "website" in kwargs and kwargs.get("website") is not None: + _check_url(str(kwargs["website"]), "website") + if "github_url" in kwargs and kwargs.get("github_url") is not None: + _check_url(str(kwargs["github_url"]), "github_url") + if "youtube_url" in kwargs and kwargs.get("youtube_url") is not None: + _check_url(str(kwargs["youtube_url"]), "youtube_url") + + self.adapter.add(*validated_args, **kwargs) def _run( self, diff --git a/lib/crewai-tools/src/crewai_tools/utilities/__init__.py b/lib/crewai-tools/src/crewai_tools/utilities/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/crewai-tools/src/crewai_tools/utilities/safe_path.py b/lib/crewai-tools/src/crewai_tools/utilities/safe_path.py new file mode 100644 index 000000000..4dde68e12 --- /dev/null +++ b/lib/crewai-tools/src/crewai_tools/utilities/safe_path.py @@ -0,0 +1,205 @@ +"""Path and URL validation utilities for crewai-tools. + +Provides validation for file paths and URLs to prevent unauthorized +file access and server-side request forgery (SSRF) when tools accept +user-controlled or LLM-controlled inputs at runtime. + +Set CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true to bypass validation (not +recommended for production). +""" + +from __future__ import annotations + +import ipaddress +import logging +import os +import socket +from urllib.parse import urlparse + + +logger = logging.getLogger(__name__) + +_UNSAFE_PATHS_ENV = "CREWAI_TOOLS_ALLOW_UNSAFE_PATHS" + + +def _is_escape_hatch_enabled() -> bool: + """Check if the unsafe paths escape hatch is enabled.""" + return os.environ.get(_UNSAFE_PATHS_ENV, "").lower() in ("true", "1", "yes") + + +# --------------------------------------------------------------------------- +# File path validation +# --------------------------------------------------------------------------- + + +def validate_file_path(path: str, base_dir: str | None = None) -> str: + """Validate that a file path is safe to read. + + Resolves symlinks and ``..`` components, then checks that the resolved + path falls within *base_dir* (defaults to the current working directory). + + Args: + path: The file path to validate. + base_dir: Allowed root directory. Defaults to ``os.getcwd()``. + + Returns: + The resolved, validated absolute path. + + Raises: + ValueError: If the path escapes the allowed directory. + """ + if _is_escape_hatch_enabled(): + logger.warning( + "%s is enabled — skipping file path validation for: %s", + _UNSAFE_PATHS_ENV, + path, + ) + return os.path.realpath(path) + + if base_dir is None: + base_dir = os.getcwd() + + resolved_base = os.path.realpath(base_dir) + resolved_path = os.path.realpath( + os.path.join(resolved_base, path) if not os.path.isabs(path) else path + ) + + # Ensure the resolved path is within the base directory. + # When resolved_base already ends with a separator (e.g. the filesystem + # root "/"), appending os.sep would double it ("//"), so use the base + # as-is in that case. + prefix = resolved_base if resolved_base.endswith(os.sep) else resolved_base + os.sep + if not resolved_path.startswith(prefix) and resolved_path != resolved_base: + raise ValueError( + f"Path '{path}' resolves to '{resolved_path}' which is outside " + f"the allowed directory '{resolved_base}'. " + f"Set {_UNSAFE_PATHS_ENV}=true to bypass this check." + ) + + return resolved_path + + +def validate_directory_path(path: str, base_dir: str | None = None) -> str: + """Validate that a directory path is safe to read. + + Same as :func:`validate_file_path` but also checks that the path + is an existing directory. + + Args: + path: The directory path to validate. + base_dir: Allowed root directory. Defaults to ``os.getcwd()``. + + Returns: + The resolved, validated absolute path. + + Raises: + ValueError: If the path escapes the allowed directory or is not a directory. + """ + validated = validate_file_path(path, base_dir) + if not os.path.isdir(validated): + raise ValueError(f"Path '{validated}' is not a directory.") + return validated + + +# --------------------------------------------------------------------------- +# URL validation +# --------------------------------------------------------------------------- + +# Private and reserved IP ranges that should not be accessed +_BLOCKED_IPV4_NETWORKS = [ + ipaddress.ip_network("10.0.0.0/8"), + ipaddress.ip_network("172.16.0.0/12"), + ipaddress.ip_network("192.168.0.0/16"), + ipaddress.ip_network("127.0.0.0/8"), + ipaddress.ip_network("169.254.0.0/16"), # Link-local / cloud metadata + ipaddress.ip_network("0.0.0.0/32"), +] + +_BLOCKED_IPV6_NETWORKS = [ + ipaddress.ip_network("::1/128"), + ipaddress.ip_network("::/128"), + ipaddress.ip_network("fc00::/7"), # Unique local addresses + ipaddress.ip_network("fe80::/10"), # Link-local IPv6 +] + + +def _is_private_or_reserved(ip_str: str) -> bool: + """Check if an IP address is private, reserved, or otherwise unsafe.""" + try: + addr = ipaddress.ip_address(ip_str) + # Unwrap IPv4-mapped IPv6 addresses (e.g., ::ffff:127.0.0.1) to IPv4 + # so they are only checked against IPv4 networks (avoids TypeError when + # an IPv4Address is compared against an IPv6Network). + if isinstance(addr, ipaddress.IPv6Address) and addr.ipv4_mapped: + addr = addr.ipv4_mapped + networks = ( + _BLOCKED_IPV4_NETWORKS + if isinstance(addr, ipaddress.IPv4Address) + else _BLOCKED_IPV6_NETWORKS + ) + return any(addr in network for network in networks) + except ValueError: + return True # If we can't parse, block it + + +def validate_url(url: str) -> str: + """Validate that a URL is safe to fetch. + + Blocks ``file://`` scheme entirely. For ``http``/``https``, resolves + DNS and checks that the target IP is not private or reserved (prevents + SSRF to internal services and cloud metadata endpoints). + + Args: + url: The URL to validate. + + Returns: + The validated URL string. + + Raises: + ValueError: If the URL uses a blocked scheme or resolves to a + private/reserved IP address. + """ + if _is_escape_hatch_enabled(): + logger.warning( + "%s is enabled — skipping URL validation for: %s", + _UNSAFE_PATHS_ENV, + url, + ) + return url + + parsed = urlparse(url) + + # Block file:// scheme + if parsed.scheme == "file": + raise ValueError( + f"file:// URLs are not allowed: '{url}'. " + f"Use a file path instead, or set {_UNSAFE_PATHS_ENV}=true to bypass." + ) + + # Only allow http and https + if parsed.scheme not in ("http", "https"): + raise ValueError( + f"URL scheme '{parsed.scheme}' is not allowed. Only http and https are supported." + ) + + if not parsed.hostname: + raise ValueError(f"URL has no hostname: '{url}'") + + # Resolve DNS and check IPs + try: + addrinfos = socket.getaddrinfo( + parsed.hostname, parsed.port or (443 if parsed.scheme == "https" else 80) + ) + except socket.gaierror as exc: + raise ValueError(f"Could not resolve hostname: '{parsed.hostname}'") from exc + + for _family, _, _, _, sockaddr in addrinfos: + ip_str = str(sockaddr[0]) + if _is_private_or_reserved(ip_str): + raise ValueError( + f"URL '{url}' resolves to private/reserved IP {ip_str}. " + f"Access to internal networks is not allowed. " + f"Set {_UNSAFE_PATHS_ENV}=true to bypass." + ) + + return url diff --git a/lib/crewai-tools/tests/tools/rag/rag_tool_test.py b/lib/crewai-tools/tests/tools/rag/rag_tool_test.py index 48411699e..93896e8b2 100644 --- a/lib/crewai-tools/tests/tools/rag/rag_tool_test.py +++ b/lib/crewai-tools/tests/tools/rag/rag_tool_test.py @@ -3,10 +3,21 @@ from tempfile import TemporaryDirectory from typing import cast from unittest.mock import MagicMock, Mock, patch +import pytest + from crewai_tools.adapters.crewai_rag_adapter import CrewAIRagAdapter from crewai_tools.tools.rag.rag_tool import RagTool +@pytest.fixture(autouse=True) +def allow_tmp_paths(monkeypatch: pytest.MonkeyPatch) -> None: + """Allow absolute paths outside CWD (e.g. /tmp/) for these RagTool tests. + + Path validation is tested separately in test_rag_tool_path_validation.py. + """ + monkeypatch.setenv("CREWAI_TOOLS_ALLOW_UNSAFE_PATHS", "true") + + @patch("crewai_tools.adapters.crewai_rag_adapter.get_rag_client") @patch("crewai_tools.adapters.crewai_rag_adapter.create_client") def test_rag_tool_initialization( diff --git a/lib/crewai-tools/tests/tools/rag/test_rag_tool_add_data_type.py b/lib/crewai-tools/tests/tools/rag/test_rag_tool_add_data_type.py index 853e6ab00..d8304ee0f 100644 --- a/lib/crewai-tools/tests/tools/rag/test_rag_tool_add_data_type.py +++ b/lib/crewai-tools/tests/tools/rag/test_rag_tool_add_data_type.py @@ -10,6 +10,15 @@ from crewai_tools.rag.data_types import DataType from crewai_tools.tools.rag.rag_tool import RagTool +@pytest.fixture(autouse=True) +def allow_tmp_paths(monkeypatch: pytest.MonkeyPatch) -> None: + """Allow absolute paths outside CWD (e.g. /tmp/) for these data-type tests. + + Path validation is tested separately in test_rag_tool_path_validation.py. + """ + monkeypatch.setenv("CREWAI_TOOLS_ALLOW_UNSAFE_PATHS", "true") + + @pytest.fixture def mock_rag_client() -> MagicMock: """Create a mock RAG client for testing.""" diff --git a/lib/crewai-tools/tests/tools/rag/test_rag_tool_path_validation.py b/lib/crewai-tools/tests/tools/rag/test_rag_tool_path_validation.py new file mode 100644 index 000000000..a58cccde3 --- /dev/null +++ b/lib/crewai-tools/tests/tools/rag/test_rag_tool_path_validation.py @@ -0,0 +1,80 @@ +"""Tests for path and URL validation in RagTool.add() — both positional and keyword args.""" + +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +import pytest + +from crewai_tools.tools.rag.rag_tool import RagTool + + +@pytest.fixture() +def mock_rag_client() -> MagicMock: + mock_client = MagicMock() + mock_client.get_or_create_collection = MagicMock(return_value=None) + mock_client.add_documents = MagicMock(return_value=None) + mock_client.search = MagicMock(return_value=[]) + return mock_client + + +@pytest.fixture() +def tool(mock_rag_client: MagicMock) -> RagTool: + with ( + patch("crewai_tools.adapters.crewai_rag_adapter.get_rag_client", return_value=mock_rag_client), + patch("crewai_tools.adapters.crewai_rag_adapter.create_client", return_value=mock_rag_client), + ): + return RagTool() + + +# --------------------------------------------------------------------------- +# Positional arg validation (existing behaviour, regression guard) +# --------------------------------------------------------------------------- + +class TestPositionalArgValidation: + def test_blocks_traversal_in_positional_arg(self, tool): + with pytest.raises(ValueError, match="Blocked unsafe"): + tool.add("../../etc/passwd") + + def test_blocks_file_url_in_positional_arg(self, tool): + with pytest.raises(ValueError, match="Blocked unsafe"): + tool.add("file:///etc/passwd") + + +# --------------------------------------------------------------------------- +# Keyword arg validation (the newly fixed gap) +# --------------------------------------------------------------------------- + +class TestKwargPathValidation: + def test_blocks_traversal_via_path_kwarg(self, tool): + with pytest.raises(ValueError, match="Blocked unsafe path"): + tool.add(path="../../etc/passwd") + + def test_blocks_traversal_via_file_path_kwarg(self, tool): + with pytest.raises(ValueError, match="Blocked unsafe file_path"): + tool.add(file_path="/etc/passwd") + + def test_blocks_traversal_via_directory_path_kwarg(self, tool): + with pytest.raises(ValueError, match="Blocked unsafe directory_path"): + tool.add(directory_path="../../sensitive_dir") + + def test_blocks_file_url_via_url_kwarg(self, tool): + with pytest.raises(ValueError, match="Blocked unsafe url"): + tool.add(url="file:///etc/passwd") + + def test_blocks_private_ip_via_url_kwarg(self, tool): + with pytest.raises(ValueError, match="Blocked unsafe url"): + tool.add(url="http://169.254.169.254/latest/meta-data/") + + def test_blocks_private_ip_via_website_kwarg(self, tool): + with pytest.raises(ValueError, match="Blocked unsafe website"): + tool.add(website="http://192.168.1.1/") + + def test_blocks_file_url_via_github_url_kwarg(self, tool): + with pytest.raises(ValueError, match="Blocked unsafe github_url"): + tool.add(github_url="file:///etc/passwd") + + def test_blocks_file_url_via_youtube_url_kwarg(self, tool): + with pytest.raises(ValueError, match="Blocked unsafe youtube_url"): + tool.add(youtube_url="file:///etc/passwd") + diff --git a/lib/crewai-tools/tests/tools/test_search_tools.py b/lib/crewai-tools/tests/tools/test_search_tools.py index 52c08633f..533be1ea2 100644 --- a/lib/crewai-tools/tests/tools/test_search_tools.py +++ b/lib/crewai-tools/tests/tools/test_search_tools.py @@ -23,6 +23,15 @@ from crewai_tools.tools.rag.rag_tool import Adapter import pytest +@pytest.fixture(autouse=True) +def allow_tmp_paths(monkeypatch: pytest.MonkeyPatch) -> None: + """Allow absolute paths outside CWD (e.g. /tmp/) for these search-tool tests. + + Path validation is tested separately in test_rag_tool_path_validation.py. + """ + monkeypatch.setenv("CREWAI_TOOLS_ALLOW_UNSAFE_PATHS", "true") + + @pytest.fixture def mock_adapter(): mock_adapter = MagicMock(spec=Adapter) diff --git a/lib/crewai-tools/tests/utilities/__init__.py b/lib/crewai-tools/tests/utilities/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/crewai-tools/tests/utilities/test_safe_path.py b/lib/crewai-tools/tests/utilities/test_safe_path.py new file mode 100644 index 000000000..83e247292 --- /dev/null +++ b/lib/crewai-tools/tests/utilities/test_safe_path.py @@ -0,0 +1,170 @@ +"""Tests for path and URL validation utilities.""" + +from __future__ import annotations + +import os + +import pytest + +from crewai_tools.utilities.safe_path import ( + validate_directory_path, + validate_file_path, + validate_url, +) + + +# --------------------------------------------------------------------------- +# File path validation +# --------------------------------------------------------------------------- + +class TestValidateFilePath: + """Tests for validate_file_path.""" + + def test_valid_relative_path(self, tmp_path): + """Normal relative path within the base directory.""" + (tmp_path / "data.json").touch() + result = validate_file_path("data.json", str(tmp_path)) + assert result == str(tmp_path / "data.json") + + def test_valid_nested_path(self, tmp_path): + """Nested path within base directory.""" + (tmp_path / "sub").mkdir() + (tmp_path / "sub" / "file.txt").touch() + result = validate_file_path("sub/file.txt", str(tmp_path)) + assert result == str(tmp_path / "sub" / "file.txt") + + def test_rejects_dotdot_traversal(self, tmp_path): + """Reject ../ traversal that escapes base_dir.""" + with pytest.raises(ValueError, match="outside the allowed directory"): + validate_file_path("../../etc/passwd", str(tmp_path)) + + def test_rejects_absolute_path_outside_base(self, tmp_path): + """Reject absolute path outside base_dir.""" + with pytest.raises(ValueError, match="outside the allowed directory"): + validate_file_path("/etc/passwd", str(tmp_path)) + + def test_allows_absolute_path_inside_base(self, tmp_path): + """Allow absolute path that's inside base_dir.""" + (tmp_path / "ok.txt").touch() + result = validate_file_path(str(tmp_path / "ok.txt"), str(tmp_path)) + assert result == str(tmp_path / "ok.txt") + + def test_rejects_symlink_escape(self, tmp_path): + """Reject symlinks that point outside base_dir.""" + link = tmp_path / "sneaky_link" + # Create a symlink pointing to /etc/passwd + os.symlink("/etc/passwd", str(link)) + with pytest.raises(ValueError, match="outside the allowed directory"): + validate_file_path("sneaky_link", str(tmp_path)) + + def test_defaults_to_cwd(self): + """When no base_dir is given, use cwd.""" + cwd = os.getcwd() + # A file in cwd should be valid + result = validate_file_path(".", None) + assert result == os.path.realpath(cwd) + + def test_escape_hatch(self, tmp_path, monkeypatch): + """CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true bypasses validation.""" + monkeypatch.setenv("CREWAI_TOOLS_ALLOW_UNSAFE_PATHS", "true") + # This would normally be rejected + result = validate_file_path("/etc/passwd", str(tmp_path)) + assert result == os.path.realpath("/etc/passwd") + + +class TestValidateDirectoryPath: + """Tests for validate_directory_path.""" + + def test_valid_directory(self, tmp_path): + (tmp_path / "subdir").mkdir() + result = validate_directory_path("subdir", str(tmp_path)) + assert result == str(tmp_path / "subdir") + + def test_rejects_file_as_directory(self, tmp_path): + (tmp_path / "file.txt").touch() + with pytest.raises(ValueError, match="not a directory"): + validate_directory_path("file.txt", str(tmp_path)) + + def test_rejects_traversal(self, tmp_path): + with pytest.raises(ValueError, match="outside the allowed directory"): + validate_directory_path("../../", str(tmp_path)) + + +# --------------------------------------------------------------------------- +# URL validation +# --------------------------------------------------------------------------- + +class TestValidateUrl: + """Tests for validate_url.""" + + def test_valid_https_url(self): + """Normal HTTPS URL should pass.""" + result = validate_url("https://example.com/data.json") + assert result == "https://example.com/data.json" + + def test_valid_http_url(self): + """Normal HTTP URL should pass.""" + result = validate_url("http://example.com/api") + assert result == "http://example.com/api" + + def test_blocks_file_scheme(self): + """file:// URLs must be blocked.""" + with pytest.raises(ValueError, match="file:// URLs are not allowed"): + validate_url("file:///etc/passwd") + + def test_blocks_file_scheme_with_host(self): + with pytest.raises(ValueError, match="file:// URLs are not allowed"): + validate_url("file://localhost/etc/shadow") + + def test_blocks_localhost(self): + """localhost must be blocked (resolves to 127.0.0.1).""" + with pytest.raises(ValueError, match="private/reserved IP"): + validate_url("http://localhost/admin") + + def test_blocks_127_0_0_1(self): + with pytest.raises(ValueError, match="private/reserved IP"): + validate_url("http://127.0.0.1/admin") + + def test_blocks_cloud_metadata(self): + """AWS/GCP/Azure metadata endpoint must be blocked.""" + with pytest.raises(ValueError, match="private/reserved IP"): + validate_url("http://169.254.169.254/latest/meta-data/") + + def test_blocks_private_10_range(self): + with pytest.raises(ValueError, match="private/reserved IP"): + validate_url("http://10.0.0.1/internal") + + def test_blocks_private_172_range(self): + with pytest.raises(ValueError, match="private/reserved IP"): + validate_url("http://172.16.0.1/internal") + + def test_blocks_private_192_range(self): + with pytest.raises(ValueError, match="private/reserved IP"): + validate_url("http://192.168.1.1/router") + + def test_blocks_zero_address(self): + with pytest.raises(ValueError, match="private/reserved IP"): + validate_url("http://0.0.0.0/") + + def test_blocks_ipv6_localhost(self): + with pytest.raises(ValueError, match="private/reserved IP"): + validate_url("http://[::1]/admin") + + def test_blocks_ftp_scheme(self): + with pytest.raises(ValueError, match="not allowed"): + validate_url("ftp://example.com/file") + + def test_blocks_empty_hostname(self): + with pytest.raises(ValueError, match="no hostname"): + validate_url("http:///path") + + def test_blocks_unresolvable_host(self): + with pytest.raises(ValueError, match="Could not resolve"): + validate_url("http://this-host-definitely-does-not-exist-abc123.com/") + + def test_escape_hatch(self, monkeypatch): + """CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true bypasses URL validation.""" + monkeypatch.setenv("CREWAI_TOOLS_ALLOW_UNSAFE_PATHS", "true") + # file:// would normally be blocked + result = validate_url("file:///etc/passwd") + assert result == "file:///etc/passwd" diff --git a/lib/crewai/src/crewai/cli/cli.py b/lib/crewai/src/crewai/cli/cli.py index b0483d570..c40fe656f 100644 --- a/lib/crewai/src/crewai/cli/cli.py +++ b/lib/crewai/src/crewai/cli/cli.py @@ -609,7 +609,6 @@ def env() -> None: @env.command("view") def env_view() -> None: """View tracing-related environment variables.""" - import os from pathlib import Path from rich.console import Console @@ -738,7 +737,6 @@ def traces_disable() -> None: @traces.command("status") def traces_status() -> None: """Show current trace collection status.""" - import os from rich.console import Console from rich.panel import Panel diff --git a/lib/crewai/src/crewai/tasks/llm_guardrail.py b/lib/crewai/src/crewai/tasks/llm_guardrail.py index 3cbd20c65..754596ab7 100644 --- a/lib/crewai/src/crewai/tasks/llm_guardrail.py +++ b/lib/crewai/src/crewai/tasks/llm_guardrail.py @@ -1,6 +1,6 @@ import asyncio -import concurrent.futures from collections.abc import Coroutine +import concurrent.futures import contextvars import inspect from typing import Any From 5958a16adeee31d6e88bf719b556283cc6b28cdb Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Wed, 8 Apr 2026 01:13:23 +0800 Subject: [PATCH 12/21] refactor: checkpoint API cleanup --- docs/ar/concepts/checkpointing.mdx | 19 ++++----- docs/en/concepts/checkpointing.mdx | 27 ++++++------ docs/ko/concepts/checkpointing.mdx | 19 ++++----- docs/pt-BR/concepts/checkpointing.mdx | 19 ++++----- .../src/crewai/state/checkpoint_config.py | 9 ++-- .../src/crewai/state/checkpoint_listener.py | 24 ++--------- lib/crewai/src/crewai/state/provider/core.py | 21 +++++++--- .../crewai/state/provider/json_provider.py | 32 +++++++++++---- .../crewai/state/provider/sqlite_provider.py | 41 ++++++++----------- lib/crewai/src/crewai/state/runtime.py | 16 ++++---- lib/crewai/tests/test_checkpoint.py | 18 ++++---- 11 files changed, 119 insertions(+), 126 deletions(-) diff --git a/docs/ar/concepts/checkpointing.mdx b/docs/ar/concepts/checkpointing.mdx index 4fa3665dd..578f04be9 100644 --- a/docs/ar/concepts/checkpointing.mdx +++ b/docs/ar/concepts/checkpointing.mdx @@ -39,7 +39,7 @@ crew = Crew( agents=[...], tasks=[...], checkpoint=CheckpointConfig( - directory="./my_checkpoints", + location="./my_checkpoints", on_events=["task_completed", "crew_kickoff_completed"], max_checkpoints=5, ), @@ -50,7 +50,7 @@ crew = Crew( | الحقل | النوع | الافتراضي | الوصف | |:------|:------|:----------|:------| -| `directory` | `str` | `"./.checkpoints"` | مسار ملفات نقاط الحفظ | +| `location` | `str` | `"./.checkpoints"` | مسار ملفات نقاط الحفظ | | `on_events` | `list[str]` | `["task_completed"]` | انواع الاحداث التي تطلق نقطة حفظ | | `provider` | `BaseProvider` | `JsonProvider()` | واجهة التخزين | | `max_checkpoints` | `int \| None` | `None` | الحد الاقصى للملفات؛ يتم حذف الاقدم اولا | @@ -95,7 +95,7 @@ result = crew.kickoff() # يستأنف من اخر مهمة مكتملة crew = Crew( agents=[researcher, writer], tasks=[research_task, write_task, review_task], - checkpoint=CheckpointConfig(directory="./crew_cp"), + checkpoint=CheckpointConfig(location="./crew_cp"), ) ``` @@ -118,7 +118,7 @@ class MyFlow(Flow): flow = MyFlow( checkpoint=CheckpointConfig( - directory="./flow_cp", + location="./flow_cp", on_events=["method_execution_finished"], ), ) @@ -137,7 +137,7 @@ agent = Agent( goal="Research topics", backstory="Expert researcher", checkpoint=CheckpointConfig( - directory="./agent_cp", + location="./agent_cp", on_events=["lite_agent_execution_completed"], ), ) @@ -160,7 +160,7 @@ crew = Crew( agents=[...], tasks=[...], checkpoint=CheckpointConfig( - directory="./my_checkpoints", + location="./my_checkpoints", provider=JsonProvider(), max_checkpoints=5, ), @@ -179,15 +179,12 @@ crew = Crew( agents=[...], tasks=[...], checkpoint=CheckpointConfig( - directory="./.checkpoints.db", - provider=SqliteProvider(max_checkpoints=50), + location="./.checkpoints.db", + provider=SqliteProvider(), ), ) ``` - -عند استخدام `SqliteProvider`، حقل `directory` هو مسار ملف قاعدة البيانات، وليس مجلدا. - ## انواع الاحداث diff --git a/docs/en/concepts/checkpointing.mdx b/docs/en/concepts/checkpointing.mdx index dccdf1b1a..21ed13905 100644 --- a/docs/en/concepts/checkpointing.mdx +++ b/docs/en/concepts/checkpointing.mdx @@ -39,7 +39,7 @@ crew = Crew( agents=[...], tasks=[...], checkpoint=CheckpointConfig( - directory="./my_checkpoints", + location="./my_checkpoints", on_events=["task_completed", "crew_kickoff_completed"], max_checkpoints=5, ), @@ -50,10 +50,10 @@ crew = Crew( | Field | Type | Default | Description | |:------|:-----|:--------|:------------| -| `directory` | `str` | `"./.checkpoints"` | Filesystem path for checkpoint files | +| `location` | `str` | `"./.checkpoints"` | Storage destination — a directory for `JsonProvider`, a database file path for `SqliteProvider` | | `on_events` | `list[str]` | `["task_completed"]` | Event types that trigger a checkpoint | | `provider` | `BaseProvider` | `JsonProvider()` | Storage backend | -| `max_checkpoints` | `int \| None` | `None` | Max files to keep; oldest pruned first | +| `max_checkpoints` | `int \| None` | `None` | Max checkpoints to keep. Oldest are pruned after each write. Pruning is handled by the provider. | ### Inheritance and Opt-Out @@ -95,7 +95,7 @@ The restored crew skips already-completed tasks and resumes from the first incom crew = Crew( agents=[researcher, writer], tasks=[research_task, write_task, review_task], - checkpoint=CheckpointConfig(directory="./crew_cp"), + checkpoint=CheckpointConfig(location="./crew_cp"), ) ``` @@ -118,7 +118,7 @@ class MyFlow(Flow): flow = MyFlow( checkpoint=CheckpointConfig( - directory="./flow_cp", + location="./flow_cp", on_events=["method_execution_finished"], ), ) @@ -137,7 +137,7 @@ agent = Agent( goal="Research topics", backstory="Expert researcher", checkpoint=CheckpointConfig( - directory="./agent_cp", + location="./agent_cp", on_events=["lite_agent_execution_completed"], ), ) @@ -160,14 +160,14 @@ crew = Crew( agents=[...], tasks=[...], checkpoint=CheckpointConfig( - directory="./my_checkpoints", + location="./my_checkpoints", provider=JsonProvider(), # this is the default max_checkpoints=5, # prunes oldest files ), ) ``` -Files are named `_.json` inside the directory. +Files are named `_.json` inside the location directory. ### SqliteProvider @@ -181,17 +181,14 @@ crew = Crew( agents=[...], tasks=[...], checkpoint=CheckpointConfig( - directory="./.checkpoints.db", - provider=SqliteProvider(max_checkpoints=50), + location="./.checkpoints.db", + provider=SqliteProvider(), + max_checkpoints=50, ), ) ``` -`SqliteProvider` accepts its own `max_checkpoints` parameter that prunes old rows via SQL. WAL journal mode is enabled for concurrent read access. - - -When using `SqliteProvider`, the `directory` field is the database file path, not a directory. The `max_checkpoints` on `CheckpointConfig` controls filesystem pruning (for `JsonProvider`), while `SqliteProvider.max_checkpoints` controls row pruning in the database. - +WAL journal mode is enabled for concurrent read access. ## Event Types diff --git a/docs/ko/concepts/checkpointing.mdx b/docs/ko/concepts/checkpointing.mdx index a08933faa..643c6d9c1 100644 --- a/docs/ko/concepts/checkpointing.mdx +++ b/docs/ko/concepts/checkpointing.mdx @@ -39,7 +39,7 @@ crew = Crew( agents=[...], tasks=[...], checkpoint=CheckpointConfig( - directory="./my_checkpoints", + location="./my_checkpoints", on_events=["task_completed", "crew_kickoff_completed"], max_checkpoints=5, ), @@ -50,7 +50,7 @@ crew = Crew( | 필드 | 타입 | 기본값 | 설명 | |:-----|:-----|:-------|:-----| -| `directory` | `str` | `"./.checkpoints"` | 체크포인트 파일 경로 | +| `location` | `str` | `"./.checkpoints"` | 체크포인트 파일 경로 | | `on_events` | `list[str]` | `["task_completed"]` | 체크포인트를 트리거하는 이벤트 타입 | | `provider` | `BaseProvider` | `JsonProvider()` | 스토리지 백엔드 | | `max_checkpoints` | `int \| None` | `None` | 보관할 최대 파일 수; 오래된 것부터 삭제 | @@ -95,7 +95,7 @@ result = crew.kickoff() # 마지막으로 완료된 태스크부터 재개 crew = Crew( agents=[researcher, writer], tasks=[research_task, write_task, review_task], - checkpoint=CheckpointConfig(directory="./crew_cp"), + checkpoint=CheckpointConfig(location="./crew_cp"), ) ``` @@ -118,7 +118,7 @@ class MyFlow(Flow): flow = MyFlow( checkpoint=CheckpointConfig( - directory="./flow_cp", + location="./flow_cp", on_events=["method_execution_finished"], ), ) @@ -137,7 +137,7 @@ agent = Agent( goal="Research topics", backstory="Expert researcher", checkpoint=CheckpointConfig( - directory="./agent_cp", + location="./agent_cp", on_events=["lite_agent_execution_completed"], ), ) @@ -160,7 +160,7 @@ crew = Crew( agents=[...], tasks=[...], checkpoint=CheckpointConfig( - directory="./my_checkpoints", + location="./my_checkpoints", provider=JsonProvider(), max_checkpoints=5, ), @@ -179,15 +179,12 @@ crew = Crew( agents=[...], tasks=[...], checkpoint=CheckpointConfig( - directory="./.checkpoints.db", - provider=SqliteProvider(max_checkpoints=50), + location="./.checkpoints.db", + provider=SqliteProvider(), ), ) ``` - -`SqliteProvider`를 사용할 때 `directory` 필드는 디렉토리가 아닌 데이터베이스 파일 경로입니다. - ## 이벤트 타입 diff --git a/docs/pt-BR/concepts/checkpointing.mdx b/docs/pt-BR/concepts/checkpointing.mdx index 1ef7aedf3..25db59713 100644 --- a/docs/pt-BR/concepts/checkpointing.mdx +++ b/docs/pt-BR/concepts/checkpointing.mdx @@ -39,7 +39,7 @@ crew = Crew( agents=[...], tasks=[...], checkpoint=CheckpointConfig( - directory="./my_checkpoints", + location="./my_checkpoints", on_events=["task_completed", "crew_kickoff_completed"], max_checkpoints=5, ), @@ -50,7 +50,7 @@ crew = Crew( | Campo | Tipo | Padrao | Descricao | |:------|:-----|:-------|:----------| -| `directory` | `str` | `"./.checkpoints"` | Caminho para os arquivos de checkpoint | +| `location` | `str` | `"./.checkpoints"` | Caminho para os arquivos de checkpoint | | `on_events` | `list[str]` | `["task_completed"]` | Tipos de evento que acionam um checkpoint | | `provider` | `BaseProvider` | `JsonProvider()` | Backend de armazenamento | | `max_checkpoints` | `int \| None` | `None` | Maximo de arquivos a manter; os mais antigos sao removidos primeiro | @@ -95,7 +95,7 @@ A crew restaurada pula tarefas ja concluidas e retoma a partir da primeira incom crew = Crew( agents=[researcher, writer], tasks=[research_task, write_task, review_task], - checkpoint=CheckpointConfig(directory="./crew_cp"), + checkpoint=CheckpointConfig(location="./crew_cp"), ) ``` @@ -118,7 +118,7 @@ class MyFlow(Flow): flow = MyFlow( checkpoint=CheckpointConfig( - directory="./flow_cp", + location="./flow_cp", on_events=["method_execution_finished"], ), ) @@ -137,7 +137,7 @@ agent = Agent( goal="Research topics", backstory="Expert researcher", checkpoint=CheckpointConfig( - directory="./agent_cp", + location="./agent_cp", on_events=["lite_agent_execution_completed"], ), ) @@ -160,7 +160,7 @@ crew = Crew( agents=[...], tasks=[...], checkpoint=CheckpointConfig( - directory="./my_checkpoints", + location="./my_checkpoints", provider=JsonProvider(), max_checkpoints=5, ), @@ -179,15 +179,12 @@ crew = Crew( agents=[...], tasks=[...], checkpoint=CheckpointConfig( - directory="./.checkpoints.db", - provider=SqliteProvider(max_checkpoints=50), + location="./.checkpoints.db", + provider=SqliteProvider(), ), ) ``` - -Ao usar `SqliteProvider`, o campo `directory` e o caminho do arquivo de banco de dados, nao um diretorio. - ## Tipos de Evento diff --git a/lib/crewai/src/crewai/state/checkpoint_config.py b/lib/crewai/src/crewai/state/checkpoint_config.py index 4c60fd35c..4c5499ff4 100644 --- a/lib/crewai/src/crewai/state/checkpoint_config.py +++ b/lib/crewai/src/crewai/state/checkpoint_config.py @@ -165,9 +165,10 @@ class CheckpointConfig(BaseModel): automatically whenever the specified event(s) fire. """ - directory: str = Field( + location: str = Field( default="./.checkpoints", - description="Filesystem path where checkpoint JSON files are written.", + description="Storage destination. For JsonProvider this is a directory " + "path; for SqliteProvider it is a database file path.", ) on_events: list[CheckpointEventType | Literal["*"]] = Field( default=["task_completed"], @@ -180,8 +181,8 @@ class CheckpointConfig(BaseModel): ) max_checkpoints: int | None = Field( default=None, - description="Maximum checkpoint files to keep. Oldest are pruned first. " - "None means keep all.", + description="Maximum checkpoints to keep. Oldest are pruned after " + "each write. None means keep all.", ) @property diff --git a/lib/crewai/src/crewai/state/checkpoint_listener.py b/lib/crewai/src/crewai/state/checkpoint_listener.py index cf5b39b2b..6471b9bde 100644 --- a/lib/crewai/src/crewai/state/checkpoint_listener.py +++ b/lib/crewai/src/crewai/state/checkpoint_listener.py @@ -7,9 +7,7 @@ avoids per-event overhead when no entity uses checkpointing. from __future__ import annotations -import glob import logging -import os import threading from typing import Any @@ -105,29 +103,13 @@ def _find_checkpoint(source: Any) -> CheckpointConfig | None: def _do_checkpoint(state: RuntimeState, cfg: CheckpointConfig) -> None: - """Write a checkpoint synchronously and optionally prune old files.""" + """Write a checkpoint and prune old ones if configured.""" _prepare_entities(state.root) data = state.model_dump_json() - cfg.provider.checkpoint(data, cfg.directory) + cfg.provider.checkpoint(data, cfg.location) if cfg.max_checkpoints is not None: - _prune(cfg.directory, cfg.max_checkpoints) - - -def _safe_remove(path: str) -> None: - try: - os.remove(path) - except OSError: - logger.debug("Failed to remove checkpoint file %s", path, exc_info=True) - - -def _prune(directory: str, max_keep: int) -> None: - """Remove oldest checkpoint files beyond *max_keep*.""" - pattern = os.path.join(directory, "*.json") - files = sorted(glob.glob(pattern), key=os.path.getmtime) - to_remove = files if max_keep == 0 else files[:-max_keep] - for path in to_remove: - _safe_remove(path) + cfg.provider.prune(cfg.location, cfg.max_checkpoints) def _should_checkpoint(source: Any, event: BaseEvent) -> CheckpointConfig | None: diff --git a/lib/crewai/src/crewai/state/provider/core.py b/lib/crewai/src/crewai/state/provider/core.py index ee420eea0..46f079444 100644 --- a/lib/crewai/src/crewai/state/provider/core.py +++ b/lib/crewai/src/crewai/state/provider/core.py @@ -34,27 +34,36 @@ class BaseProvider(Protocol): ), ) - def checkpoint(self, data: str, directory: str) -> str: + def checkpoint(self, data: str, location: str) -> str: """Persist a snapshot synchronously. Args: data: The serialized string to persist. - directory: Logical destination: path, bucket prefix, etc. + location: Storage destination (directory, file path, URI, etc.). Returns: - A location identifier for the saved checkpoint, such as a file path or URI. + A location identifier for the saved checkpoint. """ ... - async def acheckpoint(self, data: str, directory: str) -> str: + async def acheckpoint(self, data: str, location: str) -> str: """Persist a snapshot asynchronously. Args: data: The serialized string to persist. - directory: Logical destination: path, bucket prefix, etc. + location: Storage destination (directory, file path, URI, etc.). Returns: - A location identifier for the saved checkpoint, such as a file path or URI. + A location identifier for the saved checkpoint. + """ + ... + + def prune(self, location: str, max_keep: int) -> None: + """Remove old checkpoints, keeping at most *max_keep*. + + Args: + location: The storage destination passed to ``checkpoint``. + max_keep: Maximum number of checkpoints to retain. """ ... diff --git a/lib/crewai/src/crewai/state/provider/json_provider.py b/lib/crewai/src/crewai/state/provider/json_provider.py index 656e19fe0..d2ac75d9c 100644 --- a/lib/crewai/src/crewai/state/provider/json_provider.py +++ b/lib/crewai/src/crewai/state/provider/json_provider.py @@ -3,6 +3,9 @@ from __future__ import annotations from datetime import datetime, timezone +import glob +import logging +import os from pathlib import Path import uuid @@ -12,43 +15,56 @@ import aiofiles.os from crewai.state.provider.core import BaseProvider +logger = logging.getLogger(__name__) + + class JsonProvider(BaseProvider): """Persists runtime state checkpoints as JSON files on the local filesystem.""" - def checkpoint(self, data: str, directory: str) -> str: - """Write a JSON checkpoint file to the directory. + def checkpoint(self, data: str, location: str) -> str: + """Write a JSON checkpoint file. Args: data: The serialized JSON string to persist. - directory: Filesystem path where the checkpoint will be saved. + location: Directory where the checkpoint will be saved. Returns: The path to the written checkpoint file. """ - file_path = _build_path(directory) + file_path = _build_path(location) file_path.parent.mkdir(parents=True, exist_ok=True) with open(file_path, "w") as f: f.write(data) return str(file_path) - async def acheckpoint(self, data: str, directory: str) -> str: - """Write a JSON checkpoint file to the directory asynchronously. + async def acheckpoint(self, data: str, location: str) -> str: + """Write a JSON checkpoint file asynchronously. Args: data: The serialized JSON string to persist. - directory: Filesystem path where the checkpoint will be saved. + location: Directory where the checkpoint will be saved. Returns: The path to the written checkpoint file. """ - file_path = _build_path(directory) + file_path = _build_path(location) await aiofiles.os.makedirs(str(file_path.parent), exist_ok=True) async with aiofiles.open(file_path, "w") as f: await f.write(data) return str(file_path) + def prune(self, location: str, max_keep: int) -> None: + """Remove oldest checkpoint files beyond *max_keep*.""" + pattern = os.path.join(location, "*.json") + files = sorted(glob.glob(pattern), key=os.path.getmtime) + for path in files if max_keep == 0 else files[:-max_keep]: + try: + os.remove(path) + except OSError: # noqa: PERF203 + logger.debug("Failed to remove %s", path, exc_info=True) + def from_checkpoint(self, location: str) -> str: """Read a JSON checkpoint file. diff --git a/lib/crewai/src/crewai/state/provider/sqlite_provider.py b/lib/crewai/src/crewai/state/provider/sqlite_provider.py index 7a1d89399..ae014dda3 100644 --- a/lib/crewai/src/crewai/state/provider/sqlite_provider.py +++ b/lib/crewai/src/crewai/state/provider/sqlite_provider.py @@ -43,58 +43,53 @@ def _make_id() -> tuple[str, str]: class SqliteProvider(BaseProvider): """Persists runtime state checkpoints in a SQLite database. - The ``directory`` argument to ``checkpoint`` / ``acheckpoint`` is - used as the database path (e.g. ``"./.checkpoints.db"``). - - Args: - max_checkpoints: Maximum number of checkpoints to retain. - Oldest rows are pruned after each write. None keeps all. + The ``location`` argument to ``checkpoint`` / ``acheckpoint`` is + used as the database file path. """ - def __init__(self, max_checkpoints: int | None = None) -> None: - self.max_checkpoints = max_checkpoints - - def checkpoint(self, data: str, directory: str) -> str: + def checkpoint(self, data: str, location: str) -> str: """Write a checkpoint to the SQLite database. Args: data: The serialized JSON string to persist. - directory: Path to the SQLite database file. + location: Path to the SQLite database file. Returns: A location string in the format ``"db_path#checkpoint_id"``. """ checkpoint_id, ts = _make_id() - Path(directory).parent.mkdir(parents=True, exist_ok=True) - with sqlite3.connect(directory) as conn: + Path(location).parent.mkdir(parents=True, exist_ok=True) + with sqlite3.connect(location) as conn: conn.execute("PRAGMA journal_mode=WAL") conn.execute(_CREATE_TABLE) conn.execute(_INSERT, (checkpoint_id, ts, data)) - if self.max_checkpoints is not None: - conn.execute(_PRUNE, (self.max_checkpoints,)) conn.commit() - return f"{directory}#{checkpoint_id}" + return f"{location}#{checkpoint_id}" - async def acheckpoint(self, data: str, directory: str) -> str: + async def acheckpoint(self, data: str, location: str) -> str: """Write a checkpoint to the SQLite database asynchronously. Args: data: The serialized JSON string to persist. - directory: Path to the SQLite database file. + location: Path to the SQLite database file. Returns: A location string in the format ``"db_path#checkpoint_id"``. """ checkpoint_id, ts = _make_id() - Path(directory).parent.mkdir(parents=True, exist_ok=True) - async with aiosqlite.connect(directory) as db: + Path(location).parent.mkdir(parents=True, exist_ok=True) + async with aiosqlite.connect(location) as db: await db.execute("PRAGMA journal_mode=WAL") await db.execute(_CREATE_TABLE) await db.execute(_INSERT, (checkpoint_id, ts, data)) - if self.max_checkpoints is not None: - await db.execute(_PRUNE, (self.max_checkpoints,)) await db.commit() - return f"{directory}#{checkpoint_id}" + return f"{location}#{checkpoint_id}" + + def prune(self, location: str, max_keep: int) -> None: + """Remove oldest checkpoint rows beyond *max_keep*.""" + with sqlite3.connect(location) as conn: + conn.execute(_PRUNE, (max_keep,)) + conn.commit() def from_checkpoint(self, location: str) -> str: """Read a checkpoint from the SQLite database. diff --git a/lib/crewai/src/crewai/state/runtime.py b/lib/crewai/src/crewai/state/runtime.py index a5bb6bd8d..6f1c5de80 100644 --- a/lib/crewai/src/crewai/state/runtime.py +++ b/lib/crewai/src/crewai/state/runtime.py @@ -90,29 +90,31 @@ class RuntimeState(RootModel): # type: ignore[type-arg] return state return handler(data) - def checkpoint(self, directory: str) -> str: - """Write a checkpoint file to the directory. + def checkpoint(self, location: str) -> str: + """Write a checkpoint. Args: - directory: Filesystem path where the checkpoint JSON will be saved. + location: Storage destination. For JsonProvider this is a directory + path; for SqliteProvider it is a database file path. Returns: A location identifier for the saved checkpoint. """ _prepare_entities(self.root) - return self._provider.checkpoint(self.model_dump_json(), directory) + return self._provider.checkpoint(self.model_dump_json(), location) - async def acheckpoint(self, directory: str) -> str: + async def acheckpoint(self, location: str) -> str: """Async version of :meth:`checkpoint`. Args: - directory: Filesystem path where the checkpoint JSON will be saved. + location: Storage destination. For JsonProvider this is a directory + path; for SqliteProvider it is a database file path. Returns: A location identifier for the saved checkpoint. """ _prepare_entities(self.root) - return await self._provider.acheckpoint(self.model_dump_json(), directory) + return await self._provider.acheckpoint(self.model_dump_json(), location) @classmethod def from_checkpoint( diff --git a/lib/crewai/tests/test_checkpoint.py b/lib/crewai/tests/test_checkpoint.py index 3533dac85..29dc289b4 100644 --- a/lib/crewai/tests/test_checkpoint.py +++ b/lib/crewai/tests/test_checkpoint.py @@ -17,10 +17,10 @@ from crewai.flow.flow import Flow, start from crewai.state.checkpoint_config import CheckpointConfig from crewai.state.checkpoint_listener import ( _find_checkpoint, - _prune, _resolve, _SENTINEL, ) +from crewai.state.provider.json_provider import JsonProvider from crewai.task import Task @@ -37,10 +37,10 @@ class TestResolve: def test_true_returns_config(self) -> None: result = _resolve(True) assert isinstance(result, CheckpointConfig) - assert result.directory == "./.checkpoints" + assert result.location == "./.checkpoints" def test_config_returns_config(self) -> None: - cfg = CheckpointConfig(directory="/tmp/cp") + cfg = CheckpointConfig(location="/tmp/cp") assert _resolve(cfg) is cfg @@ -77,12 +77,12 @@ class TestFindCheckpoint: def test_agent_config_overrides_crew(self) -> None: a = self._make_agent( - checkpoint=CheckpointConfig(directory="/agent_cp") + checkpoint=CheckpointConfig(location="/agent_cp") ) self._make_crew([a], checkpoint=True) cfg = _find_checkpoint(a) assert isinstance(cfg, CheckpointConfig) - assert cfg.directory == "/agent_cp" + assert cfg.location == "/agent_cp" def test_task_inherits_from_crew(self) -> None: a = self._make_agent() @@ -123,7 +123,7 @@ class TestPrune: # Ensure distinct mtime time.sleep(0.01) - _prune(d, max_keep=2) + JsonProvider().prune(d, max_keep=2) remaining = os.listdir(d) assert len(remaining) == 2 assert "cp_3.json" in remaining @@ -135,7 +135,7 @@ class TestPrune: with open(os.path.join(d, f"cp_{i}.json"), "w") as f: f.write("{}") - _prune(d, max_keep=0) + JsonProvider().prune(d, max_keep=0) assert os.listdir(d) == [] def test_prune_more_than_existing(self) -> None: @@ -143,7 +143,7 @@ class TestPrune: with open(os.path.join(d, "cp.json"), "w") as f: f.write("{}") - _prune(d, max_keep=10) + JsonProvider().prune(d, max_keep=10) assert len(os.listdir(d)) == 1 @@ -153,7 +153,7 @@ class TestPrune: class TestCheckpointConfig: def test_defaults(self) -> None: cfg = CheckpointConfig() - assert cfg.directory == "./.checkpoints" + assert cfg.location == "./.checkpoints" assert cfg.on_events == ["task_completed"] assert cfg.max_checkpoints is None assert not cfg.trigger_all From a5df7c798c0db34f3648870db497fe39412a09f4 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Wed, 8 Apr 2026 01:28:25 +0800 Subject: [PATCH 13/21] feat: checkpoint list/info CLI commands --- lib/crewai/src/crewai/cli/checkpoint_cli.py | 329 ++++++++++++++++++++ lib/crewai/src/crewai/cli/cli.py | 23 ++ 2 files changed, 352 insertions(+) create mode 100644 lib/crewai/src/crewai/cli/checkpoint_cli.py diff --git a/lib/crewai/src/crewai/cli/checkpoint_cli.py b/lib/crewai/src/crewai/cli/checkpoint_cli.py new file mode 100644 index 000000000..c61500b20 --- /dev/null +++ b/lib/crewai/src/crewai/cli/checkpoint_cli.py @@ -0,0 +1,329 @@ +"""CLI commands for inspecting checkpoint files.""" + +from __future__ import annotations + +from datetime import datetime +import glob +import json +import os +import sqlite3 +from typing import Any + +import click + + +_SQLITE_MAGIC = b"SQLite format 3\x00" + +_SELECT_ALL = """ +SELECT id, created_at, json(data) +FROM checkpoints +ORDER BY rowid DESC +""" + +_SELECT_ONE = """ +SELECT id, created_at, json(data) +FROM checkpoints +WHERE id = ? +""" + +_SELECT_LATEST = """ +SELECT id, created_at, json(data) +FROM checkpoints +ORDER BY rowid DESC +LIMIT 1 +""" + + +def _is_sqlite(path: str) -> bool: + """Check if a file is a SQLite database by reading its magic bytes.""" + if not os.path.isfile(path): + return False + try: + with open(path, "rb") as f: + return f.read(16) == _SQLITE_MAGIC + except OSError: + return False + + +def _parse_checkpoint_json(raw: str, source: str) -> dict[str, Any]: + """Parse checkpoint JSON into metadata dict.""" + data = json.loads(raw) + entities = data.get("entities", []) + nodes = data.get("event_record", {}).get("nodes", {}) + event_count = len(nodes) + + trigger_event = None + if nodes: + last_node = max( + nodes.values(), + key=lambda n: n.get("event", {}).get("emission_sequence") or 0, + ) + trigger_event = last_node.get("event", {}).get("type") + + parsed_entities: list[dict[str, Any]] = [] + for entity in entities: + tasks = entity.get("tasks", []) + completed = sum(1 for t in tasks if t.get("output") is not None) + info: dict[str, Any] = { + "type": entity.get("entity_type", "unknown"), + "name": entity.get("name"), + "id": entity.get("id"), + } + if tasks: + info["tasks_completed"] = completed + info["tasks_total"] = len(tasks) + info["tasks"] = [ + { + "description": t.get("description", ""), + "completed": t.get("output") is not None, + } + for t in tasks + ] + parsed_entities.append(info) + + return { + "source": source, + "event_count": event_count, + "trigger": trigger_event, + "entities": parsed_entities, + } + + +def _format_size(size: int) -> str: + if size < 1024: + return f"{size}B" + if size < 1024 * 1024: + return f"{size / 1024:.1f}KB" + return f"{size / 1024 / 1024:.1f}MB" + + +def _ts_from_name(name: str) -> str | None: + """Extract timestamp from checkpoint ID or filename.""" + stem = os.path.basename(name).split("_")[0].removesuffix(".json") + try: + dt = datetime.strptime(stem, "%Y%m%dT%H%M%S") + except ValueError: + return None + return dt.strftime("%Y-%m-%d %H:%M:%S") + + +def _entity_summary(entities: list[dict[str, Any]]) -> str: + parts = [] + for ent in entities: + etype = ent.get("type", "unknown") + ename = ent.get("name", "") + completed = ent.get("tasks_completed") + total = ent.get("tasks_total") + if completed is not None and total is not None: + parts.append(f"{etype}:{ename} [{completed}/{total} tasks]") + else: + parts.append(f"{etype}:{ename}") + return ", ".join(parts) if parts else "empty" + + +# --- JSON directory --- + + +def _list_json(location: str) -> list[dict[str, Any]]: + pattern = os.path.join(location, "*.json") + results = [] + for path in sorted(glob.glob(pattern), key=os.path.getmtime, reverse=True): + name = os.path.basename(path) + try: + with open(path) as f: + raw = f.read() + meta = _parse_checkpoint_json(raw, source=name) + meta["name"] = name + meta["ts"] = _ts_from_name(name) + meta["size"] = os.path.getsize(path) + meta["path"] = path + except Exception: + meta = {"name": name, "ts": None, "size": 0, "entities": [], "source": name} + results.append(meta) + return results + + +def _info_json_latest(location: str) -> dict[str, Any] | None: + pattern = os.path.join(location, "*.json") + files = sorted(glob.glob(pattern), key=os.path.getmtime, reverse=True) + if not files: + return None + path = files[0] + with open(path) as f: + raw = f.read() + meta = _parse_checkpoint_json(raw, source=os.path.basename(path)) + meta["name"] = os.path.basename(path) + meta["ts"] = _ts_from_name(path) + meta["size"] = os.path.getsize(path) + meta["path"] = path + return meta + + +def _info_json_file(path: str) -> dict[str, Any]: + with open(path) as f: + raw = f.read() + meta = _parse_checkpoint_json(raw, source=os.path.basename(path)) + meta["name"] = os.path.basename(path) + meta["ts"] = _ts_from_name(path) + meta["size"] = os.path.getsize(path) + meta["path"] = path + return meta + + +# --- SQLite --- + + +def _list_sqlite(db_path: str) -> list[dict[str, Any]]: + results = [] + with sqlite3.connect(db_path) as conn: + for row in conn.execute(_SELECT_ALL): + checkpoint_id, created_at, raw = row + try: + meta = _parse_checkpoint_json(raw, source=checkpoint_id) + meta["name"] = checkpoint_id + meta["ts"] = _ts_from_name(checkpoint_id) or created_at + except Exception: + meta = { + "name": checkpoint_id, + "ts": created_at, + "entities": [], + "source": checkpoint_id, + } + results.append(meta) + return results + + +def _info_sqlite_latest(db_path: str) -> dict[str, Any] | None: + with sqlite3.connect(db_path) as conn: + row = conn.execute(_SELECT_LATEST).fetchone() + if not row: + return None + checkpoint_id, created_at, raw = row + meta = _parse_checkpoint_json(raw, source=checkpoint_id) + meta["name"] = checkpoint_id + meta["ts"] = _ts_from_name(checkpoint_id) or created_at + meta["db"] = db_path + return meta + + +def _info_sqlite_id(db_path: str, checkpoint_id: str) -> dict[str, Any] | None: + with sqlite3.connect(db_path) as conn: + row = conn.execute(_SELECT_ONE, (checkpoint_id,)).fetchone() + if not row: + return None + cid, created_at, raw = row + meta = _parse_checkpoint_json(raw, source=cid) + meta["name"] = cid + meta["ts"] = _ts_from_name(cid) or created_at + meta["db"] = db_path + return meta + + +# --- Public API --- + + +def list_checkpoints(location: str) -> None: + """List all checkpoints at a location.""" + if _is_sqlite(location): + entries = _list_sqlite(location) + label = f"SQLite: {location}" + elif os.path.isdir(location): + entries = _list_json(location) + label = location + else: + click.echo(f"Not a directory or SQLite database: {location}") + return + + if not entries: + click.echo(f"No checkpoints found in {label}") + return + + click.echo(f"Found {len(entries)} checkpoint(s) in {label}\n") + + for entry in entries: + ts = entry.get("ts") or "unknown" + name = entry.get("name", "") + size = _format_size(entry["size"]) if "size" in entry else "" + trigger = entry.get("trigger") or "" + summary = _entity_summary(entry.get("entities", [])) + parts = [name, ts] + if size: + parts.append(size) + if trigger: + parts.append(trigger) + parts.append(summary) + click.echo(f" {' '.join(parts)}") + + +def info_checkpoint(path: str) -> None: + """Show details of a single checkpoint.""" + meta: dict[str, Any] | None = None + + # db_path#checkpoint_id format + if "#" in path: + db_path, checkpoint_id = path.rsplit("#", 1) + if _is_sqlite(db_path): + meta = _info_sqlite_id(db_path, checkpoint_id) + if not meta: + click.echo(f"Checkpoint not found: {checkpoint_id}") + return + + # SQLite file — show latest + if meta is None and _is_sqlite(path): + meta = _info_sqlite_latest(path) + if not meta: + click.echo(f"No checkpoints in database: {path}") + return + click.echo(f"Latest checkpoint: {meta['name']}\n") + + # Directory — show latest JSON + if meta is None and os.path.isdir(path): + meta = _info_json_latest(path) + if not meta: + click.echo(f"No checkpoints found in {path}") + return + click.echo(f"Latest checkpoint: {meta['name']}\n") + + # Specific JSON file + if meta is None and os.path.isfile(path): + try: + meta = _info_json_file(path) + except Exception as exc: + click.echo(f"Failed to read checkpoint: {exc}") + return + + if meta is None: + click.echo(f"Not found: {path}") + return + + _print_info(meta) + + +def _print_info(meta: dict[str, Any]) -> None: + ts = meta.get("ts") or "unknown" + source = meta.get("path") or meta.get("db") or meta.get("source", "") + click.echo(f"Source: {source}") + click.echo(f"Name: {meta.get('name', '')}") + click.echo(f"Time: {ts}") + if "size" in meta: + click.echo(f"Size: {_format_size(meta['size'])}") + click.echo(f"Events: {meta.get('event_count', 0)}") + trigger = meta.get("trigger") + if trigger: + click.echo(f"Trigger: {trigger}") + + for ent in meta.get("entities", []): + eid = str(ent.get("id", ""))[:8] + click.echo(f"\n {ent['type']}: {ent.get('name', 'unnamed')} ({eid}...)") + + tasks = ent.get("tasks") + if isinstance(tasks, list): + click.echo( + f" Tasks: {ent['tasks_completed']}/{ent['tasks_total']} completed" + ) + for i, task in enumerate(tasks): + status = "done" if task.get("completed") else "pending" + desc = str(task.get("description", "")) + if len(desc) > 70: + desc = desc[:67] + "..." + click.echo(f" {i + 1}. [{status}] {desc}") diff --git a/lib/crewai/src/crewai/cli/cli.py b/lib/crewai/src/crewai/cli/cli.py index c40fe656f..57ff4551a 100644 --- a/lib/crewai/src/crewai/cli/cli.py +++ b/lib/crewai/src/crewai/cli/cli.py @@ -786,5 +786,28 @@ def traces_status() -> None: console.print(panel) +@crewai.group() +def checkpoint() -> None: + """Inspect checkpoint files.""" + + +@checkpoint.command("list") +@click.argument("location", default="./.checkpoints") +def checkpoint_list(location: str) -> None: + """List checkpoints in a directory.""" + from crewai.cli.checkpoint_cli import list_checkpoints + + list_checkpoints(location) + + +@checkpoint.command("info") +@click.argument("path", default="./.checkpoints") +def checkpoint_info(path: str) -> None: + """Show details of a checkpoint. Pass a file or directory for latest.""" + from crewai.cli.checkpoint_cli import info_checkpoint + + info_checkpoint(path) + + if __name__ == "__main__": crewai() From 868416bfe011e142a8cdf25cae9652306eeb8fa6 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Wed, 8 Apr 2026 01:44:50 +0800 Subject: [PATCH 14/21] fix: add SSRF and path traversal protections (#5315) * fix: add SSRF and path traversal protections CVE-2026-2286: validate_url blocks non-http/https schemes, private IPs, loopback, link-local, reserved addresses. Applied to 11 web tools. CVE-2026-2285: validate_path confines file access to the working directory. Applied to 7 file and directory tools. * fix: drop unused assignment from validate_url call * fix: DNS rebinding protection and allow_private flag Rewrite validated URLs to use the resolved IP, preventing DNS rebinding between validation and request time. SDK-based tools use pin_ip=False since they manage their own HTTP clients. Add allow_private flag for deployments that need internal network access. * fix: unify security utilities and restore RAG chokepoint validation Co-Authored-By: Claude Sonnet 4.6 * refactor: move validation to security/ package + address review comments - Move safe_path.py to crewai_tools/security/; add safe_url.py re-export - Keep utilities/safe_path.py as a backwards-compat shim - Update all 21 import sites to use crewai_tools.security.safe_path - files_compressor_tool: validate output_path (user-controlled) - serper_scrape_website_tool: call validate_url() before building payload - brightdata_unlocker: validate_url() already called without assignment (no-op fix) Co-Authored-By: Claude Sonnet 4.6 * refactor: move validation to security/ package, keep utilities/ as compat shim - security/safe_path.py is the canonical location for all validation - utilities/safe_path.py re-exports for backward compatibility - All tool imports already point to security.safe_path - All review comments already addressed in prior commits * fix: move validation outside try/except blocks, use correct directory validator Co-Authored-By: Claude Sonnet 4.6 * fix: use resolved paths from validation to prevent symlink TOCTOU, remove unused safe_url.py --------- Co-authored-by: Alex Co-authored-by: Claude Sonnet 4.6 --- .../src/crewai_tools/security/__init__.py | 0 .../src/crewai_tools/security/safe_path.py | 205 +++++++++++++++++ .../brightdata_tool/brightdata_unlocker.py | 3 + .../contextual_create_agent_tool.py | 5 +- .../contextual_parse_tool.py | 3 + .../directory_read_tool.py | 3 + .../directory_search_tool.py | 4 +- .../tools/file_read_tool/file_read_tool.py | 3 + .../files_compressor_tool.py | 5 + .../firecrawl_crawl_website_tool.py | 3 + .../firecrawl_scrape_website_tool.py | 3 + .../hyperbrowser_load_tool.py | 3 + .../jina_scrape_website_tool.py | 3 + .../crewai_tools/tools/ocr_tool/ocr_tool.py | 3 + .../src/crewai_tools/tools/rag/rag_tool.py | 25 ++- .../scrape_element_from_website.py | 3 + .../scrape_website_tool.py | 3 + .../scrapfly_scrape_website_tool.py | 3 + .../serper_scrape_website_tool.py | 3 + .../serply_webpage_to_markdown_tool.py | 2 + .../tools/vision_tool/vision_tool.py | 3 + .../website_search/website_search_tool.py | 2 + .../src/crewai_tools/utilities/safe_path.py | 209 +----------------- .../tests/utilities/test_safe_path.py | 2 +- 24 files changed, 288 insertions(+), 213 deletions(-) create mode 100644 lib/crewai-tools/src/crewai_tools/security/__init__.py create mode 100644 lib/crewai-tools/src/crewai_tools/security/safe_path.py diff --git a/lib/crewai-tools/src/crewai_tools/security/__init__.py b/lib/crewai-tools/src/crewai_tools/security/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/crewai-tools/src/crewai_tools/security/safe_path.py b/lib/crewai-tools/src/crewai_tools/security/safe_path.py new file mode 100644 index 000000000..4dde68e12 --- /dev/null +++ b/lib/crewai-tools/src/crewai_tools/security/safe_path.py @@ -0,0 +1,205 @@ +"""Path and URL validation utilities for crewai-tools. + +Provides validation for file paths and URLs to prevent unauthorized +file access and server-side request forgery (SSRF) when tools accept +user-controlled or LLM-controlled inputs at runtime. + +Set CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true to bypass validation (not +recommended for production). +""" + +from __future__ import annotations + +import ipaddress +import logging +import os +import socket +from urllib.parse import urlparse + + +logger = logging.getLogger(__name__) + +_UNSAFE_PATHS_ENV = "CREWAI_TOOLS_ALLOW_UNSAFE_PATHS" + + +def _is_escape_hatch_enabled() -> bool: + """Check if the unsafe paths escape hatch is enabled.""" + return os.environ.get(_UNSAFE_PATHS_ENV, "").lower() in ("true", "1", "yes") + + +# --------------------------------------------------------------------------- +# File path validation +# --------------------------------------------------------------------------- + + +def validate_file_path(path: str, base_dir: str | None = None) -> str: + """Validate that a file path is safe to read. + + Resolves symlinks and ``..`` components, then checks that the resolved + path falls within *base_dir* (defaults to the current working directory). + + Args: + path: The file path to validate. + base_dir: Allowed root directory. Defaults to ``os.getcwd()``. + + Returns: + The resolved, validated absolute path. + + Raises: + ValueError: If the path escapes the allowed directory. + """ + if _is_escape_hatch_enabled(): + logger.warning( + "%s is enabled — skipping file path validation for: %s", + _UNSAFE_PATHS_ENV, + path, + ) + return os.path.realpath(path) + + if base_dir is None: + base_dir = os.getcwd() + + resolved_base = os.path.realpath(base_dir) + resolved_path = os.path.realpath( + os.path.join(resolved_base, path) if not os.path.isabs(path) else path + ) + + # Ensure the resolved path is within the base directory. + # When resolved_base already ends with a separator (e.g. the filesystem + # root "/"), appending os.sep would double it ("//"), so use the base + # as-is in that case. + prefix = resolved_base if resolved_base.endswith(os.sep) else resolved_base + os.sep + if not resolved_path.startswith(prefix) and resolved_path != resolved_base: + raise ValueError( + f"Path '{path}' resolves to '{resolved_path}' which is outside " + f"the allowed directory '{resolved_base}'. " + f"Set {_UNSAFE_PATHS_ENV}=true to bypass this check." + ) + + return resolved_path + + +def validate_directory_path(path: str, base_dir: str | None = None) -> str: + """Validate that a directory path is safe to read. + + Same as :func:`validate_file_path` but also checks that the path + is an existing directory. + + Args: + path: The directory path to validate. + base_dir: Allowed root directory. Defaults to ``os.getcwd()``. + + Returns: + The resolved, validated absolute path. + + Raises: + ValueError: If the path escapes the allowed directory or is not a directory. + """ + validated = validate_file_path(path, base_dir) + if not os.path.isdir(validated): + raise ValueError(f"Path '{validated}' is not a directory.") + return validated + + +# --------------------------------------------------------------------------- +# URL validation +# --------------------------------------------------------------------------- + +# Private and reserved IP ranges that should not be accessed +_BLOCKED_IPV4_NETWORKS = [ + ipaddress.ip_network("10.0.0.0/8"), + ipaddress.ip_network("172.16.0.0/12"), + ipaddress.ip_network("192.168.0.0/16"), + ipaddress.ip_network("127.0.0.0/8"), + ipaddress.ip_network("169.254.0.0/16"), # Link-local / cloud metadata + ipaddress.ip_network("0.0.0.0/32"), +] + +_BLOCKED_IPV6_NETWORKS = [ + ipaddress.ip_network("::1/128"), + ipaddress.ip_network("::/128"), + ipaddress.ip_network("fc00::/7"), # Unique local addresses + ipaddress.ip_network("fe80::/10"), # Link-local IPv6 +] + + +def _is_private_or_reserved(ip_str: str) -> bool: + """Check if an IP address is private, reserved, or otherwise unsafe.""" + try: + addr = ipaddress.ip_address(ip_str) + # Unwrap IPv4-mapped IPv6 addresses (e.g., ::ffff:127.0.0.1) to IPv4 + # so they are only checked against IPv4 networks (avoids TypeError when + # an IPv4Address is compared against an IPv6Network). + if isinstance(addr, ipaddress.IPv6Address) and addr.ipv4_mapped: + addr = addr.ipv4_mapped + networks = ( + _BLOCKED_IPV4_NETWORKS + if isinstance(addr, ipaddress.IPv4Address) + else _BLOCKED_IPV6_NETWORKS + ) + return any(addr in network for network in networks) + except ValueError: + return True # If we can't parse, block it + + +def validate_url(url: str) -> str: + """Validate that a URL is safe to fetch. + + Blocks ``file://`` scheme entirely. For ``http``/``https``, resolves + DNS and checks that the target IP is not private or reserved (prevents + SSRF to internal services and cloud metadata endpoints). + + Args: + url: The URL to validate. + + Returns: + The validated URL string. + + Raises: + ValueError: If the URL uses a blocked scheme or resolves to a + private/reserved IP address. + """ + if _is_escape_hatch_enabled(): + logger.warning( + "%s is enabled — skipping URL validation for: %s", + _UNSAFE_PATHS_ENV, + url, + ) + return url + + parsed = urlparse(url) + + # Block file:// scheme + if parsed.scheme == "file": + raise ValueError( + f"file:// URLs are not allowed: '{url}'. " + f"Use a file path instead, or set {_UNSAFE_PATHS_ENV}=true to bypass." + ) + + # Only allow http and https + if parsed.scheme not in ("http", "https"): + raise ValueError( + f"URL scheme '{parsed.scheme}' is not allowed. Only http and https are supported." + ) + + if not parsed.hostname: + raise ValueError(f"URL has no hostname: '{url}'") + + # Resolve DNS and check IPs + try: + addrinfos = socket.getaddrinfo( + parsed.hostname, parsed.port or (443 if parsed.scheme == "https" else 80) + ) + except socket.gaierror as exc: + raise ValueError(f"Could not resolve hostname: '{parsed.hostname}'") from exc + + for _family, _, _, _, sockaddr in addrinfos: + ip_str = str(sockaddr[0]) + if _is_private_or_reserved(ip_str): + raise ValueError( + f"URL '{url}' resolves to private/reserved IP {ip_str}. " + f"Access to internal networks is not allowed. " + f"Set {_UNSAFE_PATHS_ENV}=true to bypass." + ) + + return url diff --git a/lib/crewai-tools/src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py b/lib/crewai-tools/src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py index ee1716d0b..c549b1220 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py +++ b/lib/crewai-tools/src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py @@ -7,6 +7,8 @@ from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field import requests +from crewai_tools.security.safe_path import validate_url + class BrightDataConfig(BaseModel): API_URL: str = "https://api.brightdata.com/request" @@ -134,6 +136,7 @@ class BrightDataWebUnlockerTool(BaseTool): "Content-Type": "application/json", } + validate_url(url) try: response = requests.post( self.base_url, json=payload, headers=headers, timeout=30 diff --git a/lib/crewai-tools/src/crewai_tools/tools/contextualai_create_agent_tool/contextual_create_agent_tool.py b/lib/crewai-tools/src/crewai_tools/tools/contextualai_create_agent_tool/contextual_create_agent_tool.py index 8896e8261..59bc0d443 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/contextualai_create_agent_tool/contextual_create_agent_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/contextualai_create_agent_tool/contextual_create_agent_tool.py @@ -3,6 +3,8 @@ from typing import Any from crewai.tools import BaseTool from pydantic import BaseModel, Field +from crewai_tools.security.safe_path import validate_file_path + class ContextualAICreateAgentSchema(BaseModel): """Schema for contextual create agent tool.""" @@ -47,6 +49,7 @@ class ContextualAICreateAgentTool(BaseTool): document_paths: list[str], ) -> str: """Create a complete RAG pipeline with documents.""" + resolved_paths = [validate_file_path(doc_path) for doc_path in document_paths] try: import os @@ -56,7 +59,7 @@ class ContextualAICreateAgentTool(BaseTool): # Upload documents document_ids = [] - for doc_path in document_paths: + for doc_path in resolved_paths: if not os.path.exists(doc_path): raise FileNotFoundError(f"Document not found: {doc_path}") diff --git a/lib/crewai-tools/src/crewai_tools/tools/contextualai_parse_tool/contextual_parse_tool.py b/lib/crewai-tools/src/crewai_tools/tools/contextualai_parse_tool/contextual_parse_tool.py index 1a0317172..99ef71514 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/contextualai_parse_tool/contextual_parse_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/contextualai_parse_tool/contextual_parse_tool.py @@ -1,6 +1,8 @@ from crewai.tools import BaseTool from pydantic import BaseModel, Field +from crewai_tools.security.safe_path import validate_file_path + class ContextualAIParseSchema(BaseModel): """Schema for contextual parse tool.""" @@ -45,6 +47,7 @@ class ContextualAIParseTool(BaseTool): """Parse a document using Contextual AI's parser.""" if output_types is None: output_types = ["markdown-per-page"] + file_path = validate_file_path(file_path) try: import json import os diff --git a/lib/crewai-tools/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py b/lib/crewai-tools/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py index f65b1b82d..cd5b31bcc 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py @@ -4,6 +4,8 @@ from typing import Any from crewai.tools import BaseTool from pydantic import BaseModel, Field +from crewai_tools.security.safe_path import validate_directory_path + class FixedDirectoryReadToolSchema(BaseModel): """Input for DirectoryReadTool.""" @@ -39,6 +41,7 @@ class DirectoryReadTool(BaseTool): if directory is None: raise ValueError("Directory must be provided.") + directory = validate_directory_path(directory) if directory[-1] == "/": directory = directory[:-1] files_list = [ diff --git a/lib/crewai-tools/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py b/lib/crewai-tools/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py index f17c4699a..3f6f278ae 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py @@ -3,8 +3,8 @@ from typing import Any from pydantic import BaseModel, Field from crewai_tools.rag.data_types import DataType +from crewai_tools.security.safe_path import validate_directory_path from crewai_tools.tools.rag.rag_tool import RagTool -from crewai_tools.utilities.safe_path import validate_directory_path class FixedDirectorySearchToolSchema(BaseModel): @@ -38,7 +38,7 @@ class DirectorySearchTool(RagTool): self._generate_description() def add(self, directory: str) -> None: # type: ignore[override] - validate_directory_path(directory) + directory = validate_directory_path(directory) super().add(directory, data_type=DataType.DIRECTORY) def _run( # type: ignore[override] diff --git a/lib/crewai-tools/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/lib/crewai-tools/src/crewai_tools/tools/file_read_tool/file_read_tool.py index 2c56a70cd..428d19d7d 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -3,6 +3,8 @@ from typing import Any from crewai.tools import BaseTool from pydantic import BaseModel, Field +from crewai_tools.security.safe_path import validate_file_path + class FileReadToolSchema(BaseModel): """Input for FileReadTool.""" @@ -76,6 +78,7 @@ class FileReadTool(BaseTool): if file_path is None: return "Error: No file path provided. Please provide a file path either in the constructor or as an argument." + file_path = validate_file_path(file_path) try: with open(file_path, "r") as file: if start_line == 1 and line_count is None: diff --git a/lib/crewai-tools/src/crewai_tools/tools/files_compressor_tool/files_compressor_tool.py b/lib/crewai-tools/src/crewai_tools/tools/files_compressor_tool/files_compressor_tool.py index 15861d987..8a759263a 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/files_compressor_tool/files_compressor_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/files_compressor_tool/files_compressor_tool.py @@ -5,6 +5,8 @@ import zipfile from crewai.tools import BaseTool from pydantic import BaseModel, Field +from crewai_tools.security.safe_path import validate_file_path + class FileCompressorToolInput(BaseModel): """Input schema for FileCompressorTool.""" @@ -40,12 +42,15 @@ class FileCompressorTool(BaseTool): overwrite: bool = False, format: str = "zip", ) -> str: + input_path = validate_file_path(input_path) if not os.path.exists(input_path): return f"Input path '{input_path}' does not exist." if not output_path: output_path = self._generate_output_path(input_path, format) + output_path = validate_file_path(output_path) + format_extension = { "zip": ".zip", "tar": ".tar", diff --git a/lib/crewai-tools/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/lib/crewai-tools/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index cce84c522..47e98135c 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -5,6 +5,8 @@ from typing import Any from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, ConfigDict, Field, PrivateAttr +from crewai_tools.security.safe_path import validate_url + try: from firecrawl import FirecrawlApp # type: ignore[import-untyped] @@ -106,6 +108,7 @@ class FirecrawlCrawlWebsiteTool(BaseTool): if not self._firecrawl: raise RuntimeError("FirecrawlApp not properly initialized") + url = validate_url(url) return self._firecrawl.crawl(url=url, poll_interval=2, **self.config) diff --git a/lib/crewai-tools/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/lib/crewai-tools/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 684cc9617..35b002961 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -5,6 +5,8 @@ from typing import Any from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, ConfigDict, Field, PrivateAttr +from crewai_tools.security.safe_path import validate_url + try: from firecrawl import FirecrawlApp # type: ignore[import-untyped] @@ -106,6 +108,7 @@ class FirecrawlScrapeWebsiteTool(BaseTool): if not self._firecrawl: raise RuntimeError("FirecrawlApp not properly initialized") + url = validate_url(url) return self._firecrawl.scrape(url=url, **self.config) diff --git a/lib/crewai-tools/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py b/lib/crewai-tools/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py index 4cf52adab..50a752d19 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py @@ -4,6 +4,8 @@ from typing import Any, Literal from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field +from crewai_tools.security.safe_path import validate_url + class HyperbrowserLoadToolSchema(BaseModel): url: str = Field(description="Website URL") @@ -119,6 +121,7 @@ class HyperbrowserLoadTool(BaseTool): ) from e params = self._prepare_params(params) + url = validate_url(url) if operation == "scrape": scrape_params = StartScrapeJobParams(url=url, **params) diff --git a/lib/crewai-tools/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py b/lib/crewai-tools/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py index 229df0f8c..6762b60e8 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py @@ -4,6 +4,8 @@ from crewai.tools import BaseTool from pydantic import BaseModel, Field import requests +from crewai_tools.security.safe_path import validate_url + class JinaScrapeWebsiteToolInput(BaseModel): """Input schema for JinaScrapeWebsiteTool.""" @@ -45,6 +47,7 @@ class JinaScrapeWebsiteTool(BaseTool): "Website URL must be provided either during initialization or execution" ) + url = validate_url(url) response = requests.get( f"https://r.jina.ai/{url}", headers=self.headers, timeout=15 ) diff --git a/lib/crewai-tools/src/crewai_tools/tools/ocr_tool/ocr_tool.py b/lib/crewai-tools/src/crewai_tools/tools/ocr_tool/ocr_tool.py index 89ae45fb6..9a2106233 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/ocr_tool/ocr_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/ocr_tool/ocr_tool.py @@ -11,6 +11,8 @@ from crewai.tools.base_tool import BaseTool from crewai.utilities.types import LLMMessage from pydantic import BaseModel, Field +from crewai_tools.security.safe_path import validate_file_path + class OCRToolSchema(BaseModel): """Input schema for Optical Character Recognition Tool. @@ -98,5 +100,6 @@ class OCRTool(BaseTool): Returns: str: Base64-encoded image data as a UTF-8 string. """ + image_path = validate_file_path(image_path) with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()).decode() diff --git a/lib/crewai-tools/src/crewai_tools/tools/rag/rag_tool.py b/lib/crewai-tools/src/crewai_tools/tools/rag/rag_tool.py index eb7e9cefd..8099443e2 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/rag/rag_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/rag/rag_tool.py @@ -251,7 +251,7 @@ class RagTool(BaseTool): # unauthorized file reads and SSRF. from urllib.parse import urlparse - from crewai_tools.utilities.safe_path import validate_file_path, validate_url + from crewai_tools.security.safe_path import validate_file_path, validate_url def _check_url(value: str, label: str) -> None: try: @@ -259,9 +259,9 @@ class RagTool(BaseTool): except ValueError as e: raise ValueError(f"Blocked unsafe {label}: {e}") from e - def _check_path(value: str, label: str) -> None: + def _check_path(value: str, label: str) -> str: try: - validate_file_path(value) + return validate_file_path(value) except ValueError as e: raise ValueError(f"Blocked unsafe {label}: {e}") from e @@ -298,21 +298,32 @@ class RagTool(BaseTool): or os.path.isabs(source_ref) ): try: - validate_file_path(source_ref) + resolved_ref = validate_file_path(source_ref) except ValueError as e: raise ValueError(f"Blocked unsafe file path: {e}") from e + # Use the resolved path to prevent symlink TOCTOU + if isinstance(arg, dict): + arg = {**arg} + if "source" in arg: + arg["source"] = resolved_ref + elif "content" in arg: + arg["content"] = resolved_ref + else: + arg = resolved_ref validated_args.append(arg) # Validate keyword path/URL arguments — these are equally user-controlled # and must not bypass the checks applied to positional args. if "path" in kwargs and kwargs.get("path") is not None: - _check_path(str(kwargs["path"]), "path") + kwargs["path"] = _check_path(str(kwargs["path"]), "path") if "file_path" in kwargs and kwargs.get("file_path") is not None: - _check_path(str(kwargs["file_path"]), "file_path") + kwargs["file_path"] = _check_path(str(kwargs["file_path"]), "file_path") if "directory_path" in kwargs and kwargs.get("directory_path") is not None: - _check_path(str(kwargs["directory_path"]), "directory_path") + kwargs["directory_path"] = _check_path( + str(kwargs["directory_path"]), "directory_path" + ) if "url" in kwargs and kwargs.get("url") is not None: _check_url(str(kwargs["url"]), "url") diff --git a/lib/crewai-tools/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py b/lib/crewai-tools/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py index fc7b69a7c..7bba12b72 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py +++ b/lib/crewai-tools/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py @@ -5,6 +5,8 @@ from crewai.tools import BaseTool from pydantic import BaseModel, Field import requests +from crewai_tools.security.safe_path import validate_url + try: from bs4 import BeautifulSoup @@ -81,6 +83,7 @@ class ScrapeElementFromWebsiteTool(BaseTool): if website_url is None or css_element is None: raise ValueError("Both website_url and css_element must be provided.") + website_url = validate_url(website_url) page = requests.get( website_url, headers=self.headers, diff --git a/lib/crewai-tools/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/lib/crewai-tools/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index 375fcb6b4..d297dfe08 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -5,6 +5,8 @@ from typing import Any from pydantic import Field import requests +from crewai_tools.security.safe_path import validate_url + try: from bs4 import BeautifulSoup @@ -73,6 +75,7 @@ class ScrapeWebsiteTool(BaseTool): if website_url is None: raise ValueError("Website URL must be provided.") + website_url = validate_url(website_url) page = requests.get( website_url, timeout=15, diff --git a/lib/crewai-tools/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py b/lib/crewai-tools/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py index 3c96d31af..932b8dc7a 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py @@ -5,6 +5,8 @@ from typing import Any, Literal from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field +from crewai_tools.security.safe_path import validate_url + logger = logging.getLogger(__file__) @@ -72,6 +74,7 @@ class ScrapflyScrapeWebsiteTool(BaseTool): ) -> str | None: from scrapfly import ScrapeConfig + url = validate_url(url) scrape_config = scrape_config if scrape_config is not None else {} try: response = self.scrapfly.scrape( # type: ignore[union-attr] diff --git a/lib/crewai-tools/src/crewai_tools/tools/serper_scrape_website_tool/serper_scrape_website_tool.py b/lib/crewai-tools/src/crewai_tools/tools/serper_scrape_website_tool/serper_scrape_website_tool.py index e0e4080b4..55521104b 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/serper_scrape_website_tool/serper_scrape_website_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/serper_scrape_website_tool/serper_scrape_website_tool.py @@ -5,6 +5,8 @@ from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field import requests +from crewai_tools.security.safe_path import validate_url + class SerperScrapeWebsiteInput(BaseModel): """Input schema for SerperScrapeWebsite.""" @@ -42,6 +44,7 @@ class SerperScrapeWebsiteTool(BaseTool): Returns: Scraped website content as a string """ + validate_url(url) try: # Serper API endpoint api_url = "https://scrape.serper.dev" diff --git a/lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py b/lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py index f3a4729f2..4ace8b46a 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py @@ -5,6 +5,7 @@ from crewai.tools import EnvVar from pydantic import BaseModel, Field import requests +from crewai_tools.security.safe_path import validate_url from crewai_tools.tools.rag.rag_tool import RagTool @@ -48,6 +49,7 @@ class SerplyWebpageToMarkdownTool(RagTool): if self.proxy_location and not self.headers.get("X-Proxy-Location"): self.headers["X-Proxy-Location"] = self.proxy_location + validate_url(url) data = {"url": url, "method": "GET", "response_type": "markdown"} response = requests.request( "POST", diff --git a/lib/crewai-tools/src/crewai_tools/tools/vision_tool/vision_tool.py b/lib/crewai-tools/src/crewai_tools/tools/vision_tool/vision_tool.py index 1fa75c688..24904c0f6 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/vision_tool/vision_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/vision_tool/vision_tool.py @@ -7,6 +7,8 @@ from crewai.tools import BaseTool, EnvVar from crewai.utilities.types import LLMMessage from pydantic import BaseModel, Field, PrivateAttr, field_validator +from crewai_tools.security.safe_path import validate_file_path + class ImagePromptSchema(BaseModel): """Input for Vision Tool.""" @@ -135,5 +137,6 @@ class VisionTool(BaseTool): Returns: Base64-encoded image data """ + image_path = validate_file_path(image_path) with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()).decode() diff --git a/lib/crewai-tools/src/crewai_tools/tools/website_search/website_search_tool.py b/lib/crewai-tools/src/crewai_tools/tools/website_search/website_search_tool.py index 323557779..62a6c1d70 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/website_search/website_search_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/website_search/website_search_tool.py @@ -3,6 +3,7 @@ from typing import Any from pydantic import BaseModel, Field from crewai_tools.rag.data_types import DataType +from crewai_tools.security.safe_path import validate_url from crewai_tools.tools.rag.rag_tool import RagTool @@ -37,6 +38,7 @@ class WebsiteSearchTool(RagTool): self._generate_description() def add(self, website: str) -> None: # type: ignore[override] + website = validate_url(website) super().add(website, data_type=DataType.WEBSITE) def _run( # type: ignore[override] diff --git a/lib/crewai-tools/src/crewai_tools/utilities/safe_path.py b/lib/crewai-tools/src/crewai_tools/utilities/safe_path.py index 4dde68e12..f3ec120fd 100644 --- a/lib/crewai-tools/src/crewai_tools/utilities/safe_path.py +++ b/lib/crewai-tools/src/crewai_tools/utilities/safe_path.py @@ -1,205 +1,10 @@ -"""Path and URL validation utilities for crewai-tools. +"""Backward-compatible re-export from crewai_tools.security.safe_path.""" -Provides validation for file paths and URLs to prevent unauthorized -file access and server-side request forgery (SSRF) when tools accept -user-controlled or LLM-controlled inputs at runtime. - -Set CREWAI_TOOLS_ALLOW_UNSAFE_PATHS=true to bypass validation (not -recommended for production). -""" - -from __future__ import annotations - -import ipaddress -import logging -import os -import socket -from urllib.parse import urlparse +from crewai_tools.security.safe_path import ( + validate_directory_path, + validate_file_path, + validate_url, +) -logger = logging.getLogger(__name__) - -_UNSAFE_PATHS_ENV = "CREWAI_TOOLS_ALLOW_UNSAFE_PATHS" - - -def _is_escape_hatch_enabled() -> bool: - """Check if the unsafe paths escape hatch is enabled.""" - return os.environ.get(_UNSAFE_PATHS_ENV, "").lower() in ("true", "1", "yes") - - -# --------------------------------------------------------------------------- -# File path validation -# --------------------------------------------------------------------------- - - -def validate_file_path(path: str, base_dir: str | None = None) -> str: - """Validate that a file path is safe to read. - - Resolves symlinks and ``..`` components, then checks that the resolved - path falls within *base_dir* (defaults to the current working directory). - - Args: - path: The file path to validate. - base_dir: Allowed root directory. Defaults to ``os.getcwd()``. - - Returns: - The resolved, validated absolute path. - - Raises: - ValueError: If the path escapes the allowed directory. - """ - if _is_escape_hatch_enabled(): - logger.warning( - "%s is enabled — skipping file path validation for: %s", - _UNSAFE_PATHS_ENV, - path, - ) - return os.path.realpath(path) - - if base_dir is None: - base_dir = os.getcwd() - - resolved_base = os.path.realpath(base_dir) - resolved_path = os.path.realpath( - os.path.join(resolved_base, path) if not os.path.isabs(path) else path - ) - - # Ensure the resolved path is within the base directory. - # When resolved_base already ends with a separator (e.g. the filesystem - # root "/"), appending os.sep would double it ("//"), so use the base - # as-is in that case. - prefix = resolved_base if resolved_base.endswith(os.sep) else resolved_base + os.sep - if not resolved_path.startswith(prefix) and resolved_path != resolved_base: - raise ValueError( - f"Path '{path}' resolves to '{resolved_path}' which is outside " - f"the allowed directory '{resolved_base}'. " - f"Set {_UNSAFE_PATHS_ENV}=true to bypass this check." - ) - - return resolved_path - - -def validate_directory_path(path: str, base_dir: str | None = None) -> str: - """Validate that a directory path is safe to read. - - Same as :func:`validate_file_path` but also checks that the path - is an existing directory. - - Args: - path: The directory path to validate. - base_dir: Allowed root directory. Defaults to ``os.getcwd()``. - - Returns: - The resolved, validated absolute path. - - Raises: - ValueError: If the path escapes the allowed directory or is not a directory. - """ - validated = validate_file_path(path, base_dir) - if not os.path.isdir(validated): - raise ValueError(f"Path '{validated}' is not a directory.") - return validated - - -# --------------------------------------------------------------------------- -# URL validation -# --------------------------------------------------------------------------- - -# Private and reserved IP ranges that should not be accessed -_BLOCKED_IPV4_NETWORKS = [ - ipaddress.ip_network("10.0.0.0/8"), - ipaddress.ip_network("172.16.0.0/12"), - ipaddress.ip_network("192.168.0.0/16"), - ipaddress.ip_network("127.0.0.0/8"), - ipaddress.ip_network("169.254.0.0/16"), # Link-local / cloud metadata - ipaddress.ip_network("0.0.0.0/32"), -] - -_BLOCKED_IPV6_NETWORKS = [ - ipaddress.ip_network("::1/128"), - ipaddress.ip_network("::/128"), - ipaddress.ip_network("fc00::/7"), # Unique local addresses - ipaddress.ip_network("fe80::/10"), # Link-local IPv6 -] - - -def _is_private_or_reserved(ip_str: str) -> bool: - """Check if an IP address is private, reserved, or otherwise unsafe.""" - try: - addr = ipaddress.ip_address(ip_str) - # Unwrap IPv4-mapped IPv6 addresses (e.g., ::ffff:127.0.0.1) to IPv4 - # so they are only checked against IPv4 networks (avoids TypeError when - # an IPv4Address is compared against an IPv6Network). - if isinstance(addr, ipaddress.IPv6Address) and addr.ipv4_mapped: - addr = addr.ipv4_mapped - networks = ( - _BLOCKED_IPV4_NETWORKS - if isinstance(addr, ipaddress.IPv4Address) - else _BLOCKED_IPV6_NETWORKS - ) - return any(addr in network for network in networks) - except ValueError: - return True # If we can't parse, block it - - -def validate_url(url: str) -> str: - """Validate that a URL is safe to fetch. - - Blocks ``file://`` scheme entirely. For ``http``/``https``, resolves - DNS and checks that the target IP is not private or reserved (prevents - SSRF to internal services and cloud metadata endpoints). - - Args: - url: The URL to validate. - - Returns: - The validated URL string. - - Raises: - ValueError: If the URL uses a blocked scheme or resolves to a - private/reserved IP address. - """ - if _is_escape_hatch_enabled(): - logger.warning( - "%s is enabled — skipping URL validation for: %s", - _UNSAFE_PATHS_ENV, - url, - ) - return url - - parsed = urlparse(url) - - # Block file:// scheme - if parsed.scheme == "file": - raise ValueError( - f"file:// URLs are not allowed: '{url}'. " - f"Use a file path instead, or set {_UNSAFE_PATHS_ENV}=true to bypass." - ) - - # Only allow http and https - if parsed.scheme not in ("http", "https"): - raise ValueError( - f"URL scheme '{parsed.scheme}' is not allowed. Only http and https are supported." - ) - - if not parsed.hostname: - raise ValueError(f"URL has no hostname: '{url}'") - - # Resolve DNS and check IPs - try: - addrinfos = socket.getaddrinfo( - parsed.hostname, parsed.port or (443 if parsed.scheme == "https" else 80) - ) - except socket.gaierror as exc: - raise ValueError(f"Could not resolve hostname: '{parsed.hostname}'") from exc - - for _family, _, _, _, sockaddr in addrinfos: - ip_str = str(sockaddr[0]) - if _is_private_or_reserved(ip_str): - raise ValueError( - f"URL '{url}' resolves to private/reserved IP {ip_str}. " - f"Access to internal networks is not allowed. " - f"Set {_UNSAFE_PATHS_ENV}=true to bypass." - ) - - return url +__all__ = ["validate_directory_path", "validate_file_path", "validate_url"] diff --git a/lib/crewai-tools/tests/utilities/test_safe_path.py b/lib/crewai-tools/tests/utilities/test_safe_path.py index 83e247292..4fb5d1ec7 100644 --- a/lib/crewai-tools/tests/utilities/test_safe_path.py +++ b/lib/crewai-tools/tests/utilities/test_safe_path.py @@ -6,7 +6,7 @@ import os import pytest -from crewai_tools.utilities.safe_path import ( +from crewai_tools.security.safe_path import ( validate_directory_path, validate_file_path, validate_url, From 1534ba202d0f335beef2d1039fd59a3e6c5cabc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Tue, 7 Apr 2026 10:45:39 -0700 Subject: [PATCH 15/21] feat: bump versions to 1.14.0 (#5321) --- lib/crewai-files/src/crewai_files/__init__.py | 2 +- lib/crewai-tools/pyproject.toml | 2 +- lib/crewai-tools/src/crewai_tools/__init__.py | 2 +- lib/crewai/pyproject.toml | 2 +- lib/crewai/src/crewai/__init__.py | 2 +- lib/crewai/src/crewai/cli/templates/crew/pyproject.toml | 2 +- lib/crewai/src/crewai/cli/templates/flow/pyproject.toml | 2 +- lib/crewai/src/crewai/cli/templates/tool/pyproject.toml | 2 +- lib/devtools/src/crewai_devtools/__init__.py | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/crewai-files/src/crewai_files/__init__.py b/lib/crewai-files/src/crewai_files/__init__.py index 9df9a3b65..7430288b5 100644 --- a/lib/crewai-files/src/crewai_files/__init__.py +++ b/lib/crewai-files/src/crewai_files/__init__.py @@ -152,4 +152,4 @@ __all__ = [ "wrap_file_source", ] -__version__ = "1.14.0a4" +__version__ = "1.14.0" diff --git a/lib/crewai-tools/pyproject.toml b/lib/crewai-tools/pyproject.toml index 6be197911..7653f9851 100644 --- a/lib/crewai-tools/pyproject.toml +++ b/lib/crewai-tools/pyproject.toml @@ -10,7 +10,7 @@ requires-python = ">=3.10, <3.14" dependencies = [ "pytube~=15.0.0", "requests~=2.32.5", - "crewai==1.14.0a4", + "crewai==1.14.0", "tiktoken~=0.8.0", "beautifulsoup4~=4.13.4", "python-docx~=1.2.0", diff --git a/lib/crewai-tools/src/crewai_tools/__init__.py b/lib/crewai-tools/src/crewai_tools/__init__.py index 5db3d05f1..2230e9afc 100644 --- a/lib/crewai-tools/src/crewai_tools/__init__.py +++ b/lib/crewai-tools/src/crewai_tools/__init__.py @@ -305,4 +305,4 @@ __all__ = [ "ZapierActionTools", ] -__version__ = "1.14.0a4" +__version__ = "1.14.0" diff --git a/lib/crewai/pyproject.toml b/lib/crewai/pyproject.toml index f845cd0a2..e883035c1 100644 --- a/lib/crewai/pyproject.toml +++ b/lib/crewai/pyproject.toml @@ -55,7 +55,7 @@ Repository = "https://github.com/crewAIInc/crewAI" [project.optional-dependencies] tools = [ - "crewai-tools==1.14.0a4", + "crewai-tools==1.14.0", ] embeddings = [ "tiktoken~=0.8.0" diff --git a/lib/crewai/src/crewai/__init__.py b/lib/crewai/src/crewai/__init__.py index 3df431554..1fdf84e70 100644 --- a/lib/crewai/src/crewai/__init__.py +++ b/lib/crewai/src/crewai/__init__.py @@ -46,7 +46,7 @@ def _suppress_pydantic_deprecation_warnings() -> None: _suppress_pydantic_deprecation_warnings() -__version__ = "1.14.0a4" +__version__ = "1.14.0" _telemetry_submitted = False diff --git a/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml b/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml index f2f9481be..0fabbb1b3 100644 --- a/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml +++ b/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml @@ -5,7 +5,7 @@ description = "{{name}} using crewAI" authors = [{ name = "Your Name", email = "you@example.com" }] requires-python = ">=3.10,<3.14" dependencies = [ - "crewai[tools]==1.14.0a4" + "crewai[tools]==1.14.0" ] [project.scripts] diff --git a/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml b/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml index 348e13f1b..e2f3e567e 100644 --- a/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml +++ b/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml @@ -5,7 +5,7 @@ description = "{{name}} using crewAI" authors = [{ name = "Your Name", email = "you@example.com" }] requires-python = ">=3.10,<3.14" dependencies = [ - "crewai[tools]==1.14.0a4" + "crewai[tools]==1.14.0" ] [project.scripts] diff --git a/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml b/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml index 43410c18f..7f65a59a0 100644 --- a/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml +++ b/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml @@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}" readme = "README.md" requires-python = ">=3.10,<3.14" dependencies = [ - "crewai[tools]==1.14.0a4" + "crewai[tools]==1.14.0" ] [tool.crewai] diff --git a/lib/devtools/src/crewai_devtools/__init__.py b/lib/devtools/src/crewai_devtools/__init__.py index 790ab4d18..54244d24f 100644 --- a/lib/devtools/src/crewai_devtools/__init__.py +++ b/lib/devtools/src/crewai_devtools/__init__.py @@ -1,3 +1,3 @@ """CrewAI development tools.""" -__version__ = "1.14.0a4" +__version__ = "1.14.0" From 25eb4adc4922a0129ec9405a34f1318f2bb48937 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Tue, 7 Apr 2026 10:47:34 -0700 Subject: [PATCH 16/21] docs: update changelog and version for v1.14.0 (#5322) --- docs/ar/changelog.mdx | 38 + docs/docs.json | 3361 +++++++++++++++++++++++++++++--------- docs/en/changelog.mdx | 38 + docs/ko/changelog.mdx | 38 + docs/pt-BR/changelog.mdx | 38 + 5 files changed, 2766 insertions(+), 747 deletions(-) diff --git a/docs/ar/changelog.mdx b/docs/ar/changelog.mdx index 5f5482eb7..b2f335d6c 100644 --- a/docs/ar/changelog.mdx +++ b/docs/ar/changelog.mdx @@ -4,6 +4,44 @@ description: "تحديثات المنتج والتحسينات وإصلاحات icon: "clock" mode: "wide" --- + + ## v1.14.0 + + [عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.0) + + ## ما الذي تغير + + ### الميزات + - إضافة أوامر CLI لقائمة/معلومات نقاط التحقق + - إضافة guardrail_type و name لتمييز التتبع + - إضافة SqliteProvider لتخزين نقاط التحقق + - إضافة CheckpointConfig للتسجيل التلقائي لنقاط التحقق + - تنفيذ تسجيل حالة وقت التشغيل، نظام الأحداث، وإعادة هيكلة المنفذ + + ### إصلاحات الأخطاء + - إضافة حماية من SSRF وتجاوز المسار + - إضافة التحقق من المسار وعنوان URL لأدوات RAG + - استبعاد متجهات التضمين من تسلسل الذاكرة لتوفير الرموز + - التأكد من وجود دليل الإخراج قبل الكتابة في قالب التدفق + - رفع litellm إلى >=1.83.0 لمعالجة CVE-2026-35030 + - إزالة حقل فهرسة SEO الذي يتسبب في عرض الصفحة العربية بشكل غير صحيح + + ### الوثائق + - تحديث سجل التغييرات والإصدار لـ v1.14.0 + - تحديث أدلة البدء السريع والتثبيت لتحسين الوضوح + - إضافة قسم مزودي التخزين، تصدير JsonProvider + - إضافة دليل علامة AMP التدريبية + + ### إعادة الهيكلة + - تنظيف واجهة برمجة تطبيقات نقاط التحقق + - إزالة CodeInterpreterTool وإهمال معلمات تنفيذ الكود + + ## المساهمون + + @alex-clawd, @github-actions[bot], @greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay, @lucasgomide + + + ## v1.14.0a4 diff --git a/docs/docs.json b/docs/docs.json index 2fea532ef..9d2679a19 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -56,7 +56,7 @@ }, "versions": [ { - "version": "v1.13.0", + "version": "v1.14.0", "default": true, "tabs": [ { @@ -528,6 +528,478 @@ } ] }, + { + "version": "v1.13.0", + "tabs": [ + { + "tab": "Home", + "icon": "house", + "groups": [ + { + "group": "Welcome", + "pages": [ + "index" + ] + } + ] + }, + { + "tab": "Documentation", + "icon": "book-open", + "groups": [ + { + "group": "Get Started", + "pages": [ + "en/introduction", + "en/installation", + "en/quickstart" + ] + }, + { + "group": "Guides", + "pages": [ + { + "group": "Strategy", + "icon": "compass", + "pages": [ + "en/guides/concepts/evaluating-use-cases" + ] + }, + { + "group": "Agents", + "icon": "user", + "pages": [ + "en/guides/agents/crafting-effective-agents" + ] + }, + { + "group": "Crews", + "icon": "users", + "pages": [ + "en/guides/crews/first-crew" + ] + }, + { + "group": "Flows", + "icon": "code-branch", + "pages": [ + "en/guides/flows/first-flow", + "en/guides/flows/mastering-flow-state" + ] + }, + { + "group": "Tools", + "icon": "wrench", + "pages": [ + "en/guides/tools/publish-custom-tools" + ] + }, + { + "group": "Coding Tools", + "icon": "terminal", + "pages": [ + "en/guides/coding-tools/agents-md" + ] + }, + { + "group": "Advanced", + "icon": "gear", + "pages": [ + "en/guides/advanced/customizing-prompts", + "en/guides/advanced/fingerprinting" + ] + }, + { + "group": "Migration", + "icon": "shuffle", + "pages": [ + "en/guides/migration/migrating-from-langgraph" + ] + } + ] + }, + { + "group": "Core Concepts", + "pages": [ + "en/concepts/agents", + "en/concepts/agent-capabilities", + "en/concepts/tasks", + "en/concepts/crews", + "en/concepts/flows", + "en/concepts/production-architecture", + "en/concepts/knowledge", + "en/concepts/skills", + "en/concepts/llms", + "en/concepts/files", + "en/concepts/processes", + "en/concepts/collaboration", + "en/concepts/training", + "en/concepts/memory", + "en/concepts/reasoning", + "en/concepts/planning", + "en/concepts/testing", + "en/concepts/cli", + "en/concepts/tools", + "en/concepts/event-listener", + "en/concepts/checkpointing" + ] + }, + { + "group": "MCP Integration", + "pages": [ + "en/mcp/overview", + "en/mcp/dsl-integration", + "en/mcp/stdio", + "en/mcp/sse", + "en/mcp/streamable-http", + "en/mcp/multiple-servers", + "en/mcp/security" + ] + }, + { + "group": "Tools", + "pages": [ + "en/tools/overview", + { + "group": "File & Document", + "icon": "folder-open", + "pages": [ + "en/tools/file-document/overview", + "en/tools/file-document/filereadtool", + "en/tools/file-document/filewritetool", + "en/tools/file-document/pdfsearchtool", + "en/tools/file-document/docxsearchtool", + "en/tools/file-document/mdxsearchtool", + "en/tools/file-document/xmlsearchtool", + "en/tools/file-document/txtsearchtool", + "en/tools/file-document/jsonsearchtool", + "en/tools/file-document/csvsearchtool", + "en/tools/file-document/directorysearchtool", + "en/tools/file-document/directoryreadtool", + "en/tools/file-document/ocrtool", + "en/tools/file-document/pdf-text-writing-tool" + ] + }, + { + "group": "Web Scraping & Browsing", + "icon": "globe", + "pages": [ + "en/tools/web-scraping/overview", + "en/tools/web-scraping/scrapewebsitetool", + "en/tools/web-scraping/scrapeelementfromwebsitetool", + "en/tools/web-scraping/scrapflyscrapetool", + "en/tools/web-scraping/seleniumscrapingtool", + "en/tools/web-scraping/scrapegraphscrapetool", + "en/tools/web-scraping/spidertool", + "en/tools/web-scraping/browserbaseloadtool", + "en/tools/web-scraping/hyperbrowserloadtool", + "en/tools/web-scraping/stagehandtool", + "en/tools/web-scraping/firecrawlcrawlwebsitetool", + "en/tools/web-scraping/firecrawlscrapewebsitetool", + "en/tools/web-scraping/oxylabsscraperstool", + "en/tools/web-scraping/brightdata-tools" + ] + }, + { + "group": "Search & Research", + "icon": "magnifying-glass", + "pages": [ + "en/tools/search-research/overview", + "en/tools/search-research/serperdevtool", + "en/tools/search-research/bravesearchtool", + "en/tools/search-research/exasearchtool", + "en/tools/search-research/linkupsearchtool", + "en/tools/search-research/githubsearchtool", + "en/tools/search-research/websitesearchtool", + "en/tools/search-research/codedocssearchtool", + "en/tools/search-research/youtubechannelsearchtool", + "en/tools/search-research/youtubevideosearchtool", + "en/tools/search-research/tavilysearchtool", + "en/tools/search-research/tavilyextractortool", + "en/tools/search-research/arxivpapertool", + "en/tools/search-research/serpapi-googlesearchtool", + "en/tools/search-research/serpapi-googleshoppingtool", + "en/tools/search-research/databricks-query-tool" + ] + }, + { + "group": "Database & Data", + "icon": "database", + "pages": [ + "en/tools/database-data/overview", + "en/tools/database-data/mysqltool", + "en/tools/database-data/pgsearchtool", + "en/tools/database-data/snowflakesearchtool", + "en/tools/database-data/nl2sqltool", + "en/tools/database-data/qdrantvectorsearchtool", + "en/tools/database-data/weaviatevectorsearchtool", + "en/tools/database-data/mongodbvectorsearchtool", + "en/tools/database-data/singlestoresearchtool" + ] + }, + { + "group": "AI & Machine Learning", + "icon": "brain", + "pages": [ + "en/tools/ai-ml/overview", + "en/tools/ai-ml/dalletool", + "en/tools/ai-ml/visiontool", + "en/tools/ai-ml/aimindtool", + "en/tools/ai-ml/llamaindextool", + "en/tools/ai-ml/langchaintool", + "en/tools/ai-ml/ragtool", + "en/tools/ai-ml/codeinterpretertool" + ] + }, + { + "group": "Cloud & Storage", + "icon": "cloud", + "pages": [ + "en/tools/cloud-storage/overview", + "en/tools/cloud-storage/s3readertool", + "en/tools/cloud-storage/s3writertool", + "en/tools/cloud-storage/bedrockkbretriever" + ] + }, + { + "group": "Integrations", + "icon": "plug", + "pages": [ + "en/tools/integration/overview", + "en/tools/integration/bedrockinvokeagenttool", + "en/tools/integration/crewaiautomationtool", + "en/tools/integration/mergeagenthandlertool" + ] + }, + { + "group": "Automation", + "icon": "bolt", + "pages": [ + "en/tools/automation/overview", + "en/tools/automation/apifyactorstool", + "en/tools/automation/composiotool", + "en/tools/automation/multiontool", + "en/tools/automation/zapieractionstool" + ] + } + ] + }, + { + "group": "Observability", + "pages": [ + "en/observability/tracing", + "en/observability/overview", + "en/observability/arize-phoenix", + "en/observability/braintrust", + "en/observability/datadog", + "en/observability/galileo", + "en/observability/langdb", + "en/observability/langfuse", + "en/observability/langtrace", + "en/observability/maxim", + "en/observability/mlflow", + "en/observability/neatlogs", + "en/observability/openlit", + "en/observability/opik", + "en/observability/patronus-evaluation", + "en/observability/portkey", + "en/observability/weave", + "en/observability/truefoundry" + ] + }, + { + "group": "Learn", + "pages": [ + "en/learn/overview", + "en/learn/llm-selection-guide", + "en/learn/conditional-tasks", + "en/learn/coding-agents", + "en/learn/create-custom-tools", + "en/learn/custom-llm", + "en/learn/custom-manager-agent", + "en/learn/customizing-agents", + "en/learn/dalle-image-generation", + "en/learn/force-tool-output-as-result", + "en/learn/hierarchical-process", + "en/learn/human-input-on-execution", + "en/learn/human-in-the-loop", + "en/learn/human-feedback-in-flows", + "en/learn/kickoff-async", + "en/learn/kickoff-for-each", + "en/learn/llm-connections", + "en/learn/litellm-removal-guide", + "en/learn/multimodal-agents", + "en/learn/replay-tasks-from-latest-crew-kickoff", + "en/learn/sequential-process", + "en/learn/using-annotations", + "en/learn/execution-hooks", + "en/learn/llm-hooks", + "en/learn/tool-hooks" + ] + }, + { + "group": "Telemetry", + "pages": [ + "en/telemetry" + ] + } + ] + }, + { + "tab": "AMP", + "icon": "briefcase", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "en/enterprise/introduction" + ] + }, + { + "group": "Build", + "pages": [ + "en/enterprise/features/automations", + "en/enterprise/features/crew-studio", + "en/enterprise/features/marketplace", + "en/enterprise/features/agent-repositories", + "en/enterprise/features/tools-and-integrations", + "en/enterprise/features/pii-trace-redactions" + ] + }, + { + "group": "Operate", + "pages": [ + "en/enterprise/features/traces", + "en/enterprise/features/webhook-streaming", + "en/enterprise/features/hallucination-guardrail", + "en/enterprise/features/flow-hitl-management" + ] + }, + { + "group": "Manage", + "pages": [ + "en/enterprise/features/sso", + "en/enterprise/features/rbac" + ] + }, + { + "group": "Integration Docs", + "pages": [ + "en/enterprise/integrations/asana", + "en/enterprise/integrations/box", + "en/enterprise/integrations/clickup", + "en/enterprise/integrations/github", + "en/enterprise/integrations/gmail", + "en/enterprise/integrations/google_calendar", + "en/enterprise/integrations/google_contacts", + "en/enterprise/integrations/google_docs", + "en/enterprise/integrations/google_drive", + "en/enterprise/integrations/google_sheets", + "en/enterprise/integrations/google_slides", + "en/enterprise/integrations/hubspot", + "en/enterprise/integrations/jira", + "en/enterprise/integrations/linear", + "en/enterprise/integrations/microsoft_excel", + "en/enterprise/integrations/microsoft_onedrive", + "en/enterprise/integrations/microsoft_outlook", + "en/enterprise/integrations/microsoft_sharepoint", + "en/enterprise/integrations/microsoft_teams", + "en/enterprise/integrations/microsoft_word", + "en/enterprise/integrations/notion", + "en/enterprise/integrations/salesforce", + "en/enterprise/integrations/shopify", + "en/enterprise/integrations/slack", + "en/enterprise/integrations/stripe", + "en/enterprise/integrations/zendesk" + ] + }, + { + "group": "Triggers", + "pages": [ + "en/enterprise/guides/automation-triggers", + "en/enterprise/guides/gmail-trigger", + "en/enterprise/guides/google-calendar-trigger", + "en/enterprise/guides/google-drive-trigger", + "en/enterprise/guides/outlook-trigger", + "en/enterprise/guides/onedrive-trigger", + "en/enterprise/guides/microsoft-teams-trigger", + "en/enterprise/guides/slack-trigger", + "en/enterprise/guides/hubspot-trigger", + "en/enterprise/guides/salesforce-trigger", + "en/enterprise/guides/zapier-trigger" + ] + }, + { + "group": "How-To Guides", + "pages": [ + "en/enterprise/guides/build-crew", + "en/enterprise/guides/prepare-for-deployment", + "en/enterprise/guides/deploy-to-amp", + "en/enterprise/guides/private-package-registry", + "en/enterprise/guides/kickoff-crew", + "en/enterprise/guides/update-crew", + "en/enterprise/guides/enable-crew-studio", + "en/enterprise/guides/capture_telemetry_logs", + "en/enterprise/guides/azure-openai-setup", + "en/enterprise/guides/tool-repository", + "en/enterprise/guides/custom-mcp-server", + "en/enterprise/guides/react-component-export", + "en/enterprise/guides/team-management", + "en/enterprise/guides/human-in-the-loop", + "en/enterprise/guides/webhook-automation" + ] + }, + { + "group": "Resources", + "pages": [ + "en/enterprise/resources/frequently-asked-questions" + ] + } + ] + }, + { + "tab": "API Reference", + "icon": "magnifying-glass", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "en/api-reference/introduction", + "en/api-reference/inputs", + "en/api-reference/kickoff", + "en/api-reference/resume", + "en/api-reference/status" + ] + } + ] + }, + { + "tab": "Examples", + "icon": "code", + "groups": [ + { + "group": "Examples", + "pages": [ + "en/examples/example", + "en/examples/cookbooks" + ] + } + ] + }, + { + "tab": "Changelog", + "icon": "clock", + "groups": [ + { + "group": "Release Notes", + "pages": [ + "en/changelog" + ] + } + ] + } + ] + }, { "version": "v1.12.2", "tabs": [ @@ -3838,7 +4310,7 @@ "icon": "globe" }, { - "anchor": "F\u00f3rum", + "anchor": "Fórum", "href": "https://community.crewai.com", "icon": "discourse" }, @@ -3856,11 +4328,11 @@ }, "versions": [ { - "version": "v1.13.0", + "version": "v1.14.0", "default": true, "tabs": [ { - "tab": "In\u00edcio", + "tab": "Início", "icon": "house", "groups": [ { @@ -3872,11 +4344,11 @@ ] }, { - "tab": "Documenta\u00e7\u00e3o", + "tab": "Documentação", "icon": "book-open", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -3887,7 +4359,7 @@ "group": "Guias", "pages": [ { - "group": "Estrat\u00e9gia", + "group": "Estratégia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -3923,14 +4395,14 @@ ] }, { - "group": "Ferramentas de Codifica\u00e7\u00e3o", + "group": "Ferramentas de Codificação", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avan\u00e7ado", + "group": "Avançado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -3938,7 +4410,7 @@ ] }, { - "group": "Migra\u00e7\u00e3o", + "group": "Migração", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -3973,7 +4445,7 @@ ] }, { - "group": "Integra\u00e7\u00e3o MCP", + "group": "Integração MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -4007,7 +4479,7 @@ ] }, { - "group": "Web Scraping & Navega\u00e7\u00e3o", + "group": "Web Scraping & Navegação", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -4088,7 +4560,7 @@ ] }, { - "group": "Automa\u00e7\u00e3o", + "group": "Automação", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -4163,7 +4635,7 @@ "icon": "briefcase", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -4195,7 +4667,7 @@ ] }, { - "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", + "group": "Documentação de Integração", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -4271,11 +4743,11 @@ ] }, { - "tab": "Refer\u00eancia da API", + "tab": "Referência da API", "icon": "magnifying-glass", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -4300,11 +4772,468 @@ ] }, { - "tab": "Notas de Vers\u00e3o", + "tab": "Notas de Versão", "icon": "clock", "groups": [ { - "group": "Notas de Vers\u00e3o", + "group": "Notas de Versão", + "pages": [ + "pt-BR/changelog" + ] + } + ] + } + ] + }, + { + "version": "v1.13.0", + "tabs": [ + { + "tab": "Início", + "icon": "house", + "groups": [ + { + "group": "Bem-vindo", + "pages": [ + "pt-BR/index" + ] + } + ] + }, + { + "tab": "Documentação", + "icon": "book-open", + "groups": [ + { + "group": "Começando", + "pages": [ + "pt-BR/introduction", + "pt-BR/installation", + "pt-BR/quickstart" + ] + }, + { + "group": "Guias", + "pages": [ + { + "group": "Estratégia", + "icon": "compass", + "pages": [ + "pt-BR/guides/concepts/evaluating-use-cases" + ] + }, + { + "group": "Agentes", + "icon": "user", + "pages": [ + "pt-BR/guides/agents/crafting-effective-agents" + ] + }, + { + "group": "Crews", + "icon": "users", + "pages": [ + "pt-BR/guides/crews/first-crew" + ] + }, + { + "group": "Flows", + "icon": "code-branch", + "pages": [ + "pt-BR/guides/flows/first-flow", + "pt-BR/guides/flows/mastering-flow-state" + ] + }, + { + "group": "Ferramentas", + "icon": "wrench", + "pages": [ + "pt-BR/guides/tools/publish-custom-tools" + ] + }, + { + "group": "Ferramentas de Codificação", + "icon": "terminal", + "pages": [ + "pt-BR/guides/coding-tools/agents-md" + ] + }, + { + "group": "Avançado", + "icon": "gear", + "pages": [ + "pt-BR/guides/advanced/customizing-prompts", + "pt-BR/guides/advanced/fingerprinting" + ] + }, + { + "group": "Migração", + "icon": "shuffle", + "pages": [ + "pt-BR/guides/migration/migrating-from-langgraph" + ] + } + ] + }, + { + "group": "Conceitos-Chave", + "pages": [ + "pt-BR/concepts/agents", + "pt-BR/concepts/agent-capabilities", + "pt-BR/concepts/tasks", + "pt-BR/concepts/crews", + "pt-BR/concepts/flows", + "pt-BR/concepts/production-architecture", + "pt-BR/concepts/knowledge", + "pt-BR/concepts/skills", + "pt-BR/concepts/llms", + "pt-BR/concepts/files", + "pt-BR/concepts/processes", + "pt-BR/concepts/collaboration", + "pt-BR/concepts/training", + "pt-BR/concepts/memory", + "pt-BR/concepts/reasoning", + "pt-BR/concepts/planning", + "pt-BR/concepts/testing", + "pt-BR/concepts/cli", + "pt-BR/concepts/tools", + "pt-BR/concepts/event-listener", + "pt-BR/concepts/checkpointing" + ] + }, + { + "group": "Integração MCP", + "pages": [ + "pt-BR/mcp/overview", + "pt-BR/mcp/dsl-integration", + "pt-BR/mcp/stdio", + "pt-BR/mcp/sse", + "pt-BR/mcp/streamable-http", + "pt-BR/mcp/multiple-servers", + "pt-BR/mcp/security" + ] + }, + { + "group": "Ferramentas", + "pages": [ + "pt-BR/tools/overview", + { + "group": "Arquivo & Documento", + "icon": "folder-open", + "pages": [ + "pt-BR/tools/file-document/overview", + "pt-BR/tools/file-document/filereadtool", + "pt-BR/tools/file-document/filewritetool", + "pt-BR/tools/file-document/pdfsearchtool", + "pt-BR/tools/file-document/docxsearchtool", + "pt-BR/tools/file-document/mdxsearchtool", + "pt-BR/tools/file-document/xmlsearchtool", + "pt-BR/tools/file-document/txtsearchtool", + "pt-BR/tools/file-document/jsonsearchtool", + "pt-BR/tools/file-document/csvsearchtool", + "pt-BR/tools/file-document/directorysearchtool", + "pt-BR/tools/file-document/directoryreadtool" + ] + }, + { + "group": "Web Scraping & Navegação", + "icon": "globe", + "pages": [ + "pt-BR/tools/web-scraping/overview", + "pt-BR/tools/web-scraping/scrapewebsitetool", + "pt-BR/tools/web-scraping/scrapeelementfromwebsitetool", + "pt-BR/tools/web-scraping/scrapflyscrapetool", + "pt-BR/tools/web-scraping/seleniumscrapingtool", + "pt-BR/tools/web-scraping/scrapegraphscrapetool", + "pt-BR/tools/web-scraping/spidertool", + "pt-BR/tools/web-scraping/browserbaseloadtool", + "pt-BR/tools/web-scraping/hyperbrowserloadtool", + "pt-BR/tools/web-scraping/stagehandtool", + "pt-BR/tools/web-scraping/firecrawlcrawlwebsitetool", + "pt-BR/tools/web-scraping/firecrawlscrapewebsitetool", + "pt-BR/tools/web-scraping/oxylabsscraperstool" + ] + }, + { + "group": "Pesquisa", + "icon": "magnifying-glass", + "pages": [ + "pt-BR/tools/search-research/overview", + "pt-BR/tools/search-research/serperdevtool", + "pt-BR/tools/search-research/bravesearchtool", + "pt-BR/tools/search-research/exasearchtool", + "pt-BR/tools/search-research/linkupsearchtool", + "pt-BR/tools/search-research/githubsearchtool", + "pt-BR/tools/search-research/websitesearchtool", + "pt-BR/tools/search-research/codedocssearchtool", + "pt-BR/tools/search-research/youtubechannelsearchtool", + "pt-BR/tools/search-research/youtubevideosearchtool" + ] + }, + { + "group": "Dados", + "icon": "database", + "pages": [ + "pt-BR/tools/database-data/overview", + "pt-BR/tools/database-data/mysqltool", + "pt-BR/tools/database-data/pgsearchtool", + "pt-BR/tools/database-data/snowflakesearchtool", + "pt-BR/tools/database-data/nl2sqltool", + "pt-BR/tools/database-data/qdrantvectorsearchtool", + "pt-BR/tools/database-data/weaviatevectorsearchtool" + ] + }, + { + "group": "IA & Machine Learning", + "icon": "brain", + "pages": [ + "pt-BR/tools/ai-ml/overview", + "pt-BR/tools/ai-ml/dalletool", + "pt-BR/tools/ai-ml/visiontool", + "pt-BR/tools/ai-ml/aimindtool", + "pt-BR/tools/ai-ml/llamaindextool", + "pt-BR/tools/ai-ml/langchaintool", + "pt-BR/tools/ai-ml/ragtool", + "pt-BR/tools/ai-ml/codeinterpretertool" + ] + }, + { + "group": "Cloud & Armazenamento", + "icon": "cloud", + "pages": [ + "pt-BR/tools/cloud-storage/overview", + "pt-BR/tools/cloud-storage/s3readertool", + "pt-BR/tools/cloud-storage/s3writertool", + "pt-BR/tools/cloud-storage/bedrockkbretriever" + ] + }, + { + "group": "Integrations", + "icon": "plug", + "pages": [ + "pt-BR/tools/integration/overview", + "pt-BR/tools/integration/bedrockinvokeagenttool", + "pt-BR/tools/integration/crewaiautomationtool" + ] + }, + { + "group": "Automação", + "icon": "bolt", + "pages": [ + "pt-BR/tools/automation/overview", + "pt-BR/tools/automation/apifyactorstool", + "pt-BR/tools/automation/composiotool", + "pt-BR/tools/automation/multiontool" + ] + } + ] + }, + { + "group": "Observabilidade", + "pages": [ + "pt-BR/observability/tracing", + "pt-BR/observability/overview", + "pt-BR/observability/arize-phoenix", + "pt-BR/observability/braintrust", + "pt-BR/observability/datadog", + "pt-BR/observability/galileo", + "pt-BR/observability/langdb", + "pt-BR/observability/langfuse", + "pt-BR/observability/langtrace", + "pt-BR/observability/maxim", + "pt-BR/observability/mlflow", + "pt-BR/observability/openlit", + "pt-BR/observability/opik", + "pt-BR/observability/patronus-evaluation", + "pt-BR/observability/portkey", + "pt-BR/observability/weave", + "pt-BR/observability/truefoundry" + ] + }, + { + "group": "Aprenda", + "pages": [ + "pt-BR/learn/overview", + "pt-BR/learn/llm-selection-guide", + "pt-BR/learn/conditional-tasks", + "pt-BR/learn/coding-agents", + "pt-BR/learn/create-custom-tools", + "pt-BR/learn/custom-llm", + "pt-BR/learn/custom-manager-agent", + "pt-BR/learn/customizing-agents", + "pt-BR/learn/dalle-image-generation", + "pt-BR/learn/force-tool-output-as-result", + "pt-BR/learn/hierarchical-process", + "pt-BR/learn/human-input-on-execution", + "pt-BR/learn/human-in-the-loop", + "pt-BR/learn/human-feedback-in-flows", + "pt-BR/learn/kickoff-async", + "pt-BR/learn/kickoff-for-each", + "pt-BR/learn/llm-connections", + "pt-BR/learn/multimodal-agents", + "pt-BR/learn/replay-tasks-from-latest-crew-kickoff", + "pt-BR/learn/sequential-process", + "pt-BR/learn/using-annotations", + "pt-BR/learn/execution-hooks", + "pt-BR/learn/llm-hooks", + "pt-BR/learn/tool-hooks" + ] + }, + { + "group": "Telemetria", + "pages": [ + "pt-BR/telemetry" + ] + } + ] + }, + { + "tab": "AMP", + "icon": "briefcase", + "groups": [ + { + "group": "Começando", + "pages": [ + "pt-BR/enterprise/introduction" + ] + }, + { + "group": "Construir", + "pages": [ + "pt-BR/enterprise/features/automations", + "pt-BR/enterprise/features/crew-studio", + "pt-BR/enterprise/features/marketplace", + "pt-BR/enterprise/features/agent-repositories", + "pt-BR/enterprise/features/tools-and-integrations", + "pt-BR/enterprise/features/pii-trace-redactions" + ] + }, + { + "group": "Operar", + "pages": [ + "pt-BR/enterprise/features/traces", + "pt-BR/enterprise/features/webhook-streaming", + "pt-BR/enterprise/features/hallucination-guardrail", + "pt-BR/enterprise/features/flow-hitl-management" + ] + }, + { + "group": "Gerenciar", + "pages": [ + "pt-BR/enterprise/features/rbac" + ] + }, + { + "group": "Documentação de Integração", + "pages": [ + "pt-BR/enterprise/integrations/asana", + "pt-BR/enterprise/integrations/box", + "pt-BR/enterprise/integrations/clickup", + "pt-BR/enterprise/integrations/github", + "pt-BR/enterprise/integrations/gmail", + "pt-BR/enterprise/integrations/google_calendar", + "pt-BR/enterprise/integrations/google_contacts", + "pt-BR/enterprise/integrations/google_docs", + "pt-BR/enterprise/integrations/google_drive", + "pt-BR/enterprise/integrations/google_sheets", + "pt-BR/enterprise/integrations/google_slides", + "pt-BR/enterprise/integrations/hubspot", + "pt-BR/enterprise/integrations/jira", + "pt-BR/enterprise/integrations/linear", + "pt-BR/enterprise/integrations/microsoft_excel", + "pt-BR/enterprise/integrations/microsoft_onedrive", + "pt-BR/enterprise/integrations/microsoft_outlook", + "pt-BR/enterprise/integrations/microsoft_sharepoint", + "pt-BR/enterprise/integrations/microsoft_teams", + "pt-BR/enterprise/integrations/microsoft_word", + "pt-BR/enterprise/integrations/notion", + "pt-BR/enterprise/integrations/salesforce", + "pt-BR/enterprise/integrations/shopify", + "pt-BR/enterprise/integrations/slack", + "pt-BR/enterprise/integrations/stripe", + "pt-BR/enterprise/integrations/zendesk" + ] + }, + { + "group": "Guias", + "pages": [ + "pt-BR/enterprise/guides/build-crew", + "pt-BR/enterprise/guides/prepare-for-deployment", + "pt-BR/enterprise/guides/deploy-to-amp", + "pt-BR/enterprise/guides/private-package-registry", + "pt-BR/enterprise/guides/kickoff-crew", + "pt-BR/enterprise/guides/training-crews", + "pt-BR/enterprise/guides/update-crew", + "pt-BR/enterprise/guides/enable-crew-studio", + "pt-BR/enterprise/guides/capture_telemetry_logs", + "pt-BR/enterprise/guides/azure-openai-setup", + "pt-BR/enterprise/guides/tool-repository", + "pt-BR/enterprise/guides/custom-mcp-server", + "pt-BR/enterprise/guides/react-component-export", + "pt-BR/enterprise/guides/team-management", + "pt-BR/enterprise/guides/human-in-the-loop", + "pt-BR/enterprise/guides/webhook-automation" + ] + }, + { + "group": "Triggers", + "pages": [ + "pt-BR/enterprise/guides/automation-triggers", + "pt-BR/enterprise/guides/gmail-trigger", + "pt-BR/enterprise/guides/google-calendar-trigger", + "pt-BR/enterprise/guides/google-drive-trigger", + "pt-BR/enterprise/guides/outlook-trigger", + "pt-BR/enterprise/guides/onedrive-trigger", + "pt-BR/enterprise/guides/microsoft-teams-trigger", + "pt-BR/enterprise/guides/slack-trigger", + "pt-BR/enterprise/guides/hubspot-trigger", + "pt-BR/enterprise/guides/salesforce-trigger", + "pt-BR/enterprise/guides/zapier-trigger" + ] + }, + { + "group": "Recursos", + "pages": [ + "pt-BR/enterprise/resources/frequently-asked-questions" + ] + } + ] + }, + { + "tab": "Referência da API", + "icon": "magnifying-glass", + "groups": [ + { + "group": "Começando", + "pages": [ + "pt-BR/api-reference/introduction", + "pt-BR/api-reference/inputs", + "pt-BR/api-reference/kickoff", + "pt-BR/api-reference/resume", + "pt-BR/api-reference/status" + ] + } + ] + }, + { + "tab": "Exemplos", + "icon": "code", + "groups": [ + { + "group": "Exemplos", + "pages": [ + "pt-BR/examples/example", + "pt-BR/examples/cookbooks" + ] + } + ] + }, + { + "tab": "Notas de Versão", + "icon": "clock", + "groups": [ + { + "group": "Notas de Versão", "pages": [ "pt-BR/changelog" ] @@ -4317,7 +5246,7 @@ "version": "v1.12.2", "tabs": [ { - "tab": "In\u00edcio", + "tab": "Início", "icon": "house", "groups": [ { @@ -4329,11 +5258,11 @@ ] }, { - "tab": "Documenta\u00e7\u00e3o", + "tab": "Documentação", "icon": "book-open", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -4344,7 +5273,7 @@ "group": "Guias", "pages": [ { - "group": "Estrat\u00e9gia", + "group": "Estratégia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -4380,14 +5309,14 @@ ] }, { - "group": "Ferramentas de Codifica\u00e7\u00e3o", + "group": "Ferramentas de Codificação", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avan\u00e7ado", + "group": "Avançado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -4395,7 +5324,7 @@ ] }, { - "group": "Migra\u00e7\u00e3o", + "group": "Migração", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -4430,7 +5359,7 @@ ] }, { - "group": "Integra\u00e7\u00e3o MCP", + "group": "Integração MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -4464,7 +5393,7 @@ ] }, { - "group": "Web Scraping & Navega\u00e7\u00e3o", + "group": "Web Scraping & Navegação", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -4545,7 +5474,7 @@ ] }, { - "group": "Automa\u00e7\u00e3o", + "group": "Automação", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -4620,7 +5549,7 @@ "icon": "briefcase", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -4652,7 +5581,7 @@ ] }, { - "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", + "group": "Documentação de Integração", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -4728,11 +5657,11 @@ ] }, { - "tab": "Refer\u00eancia da API", + "tab": "Referência da API", "icon": "magnifying-glass", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -4757,11 +5686,11 @@ ] }, { - "tab": "Notas de Vers\u00e3o", + "tab": "Notas de Versão", "icon": "clock", "groups": [ { - "group": "Notas de Vers\u00e3o", + "group": "Notas de Versão", "pages": [ "pt-BR/changelog" ] @@ -4774,7 +5703,7 @@ "version": "v1.12.1", "tabs": [ { - "tab": "In\u00edcio", + "tab": "Início", "icon": "house", "groups": [ { @@ -4786,11 +5715,11 @@ ] }, { - "tab": "Documenta\u00e7\u00e3o", + "tab": "Documentação", "icon": "book-open", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -4801,7 +5730,7 @@ "group": "Guias", "pages": [ { - "group": "Estrat\u00e9gia", + "group": "Estratégia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -4837,14 +5766,14 @@ ] }, { - "group": "Ferramentas de Codifica\u00e7\u00e3o", + "group": "Ferramentas de Codificação", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avan\u00e7ado", + "group": "Avançado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -4852,7 +5781,7 @@ ] }, { - "group": "Migra\u00e7\u00e3o", + "group": "Migração", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -4886,7 +5815,7 @@ ] }, { - "group": "Integra\u00e7\u00e3o MCP", + "group": "Integração MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -4920,7 +5849,7 @@ ] }, { - "group": "Web Scraping & Navega\u00e7\u00e3o", + "group": "Web Scraping & Navegação", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -5001,7 +5930,7 @@ ] }, { - "group": "Automa\u00e7\u00e3o", + "group": "Automação", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -5076,7 +6005,7 @@ "icon": "briefcase", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -5108,7 +6037,7 @@ ] }, { - "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", + "group": "Documentação de Integração", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -5184,11 +6113,11 @@ ] }, { - "tab": "Refer\u00eancia da API", + "tab": "Referência da API", "icon": "magnifying-glass", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -5213,11 +6142,11 @@ ] }, { - "tab": "Notas de Vers\u00e3o", + "tab": "Notas de Versão", "icon": "clock", "groups": [ { - "group": "Notas de Vers\u00e3o", + "group": "Notas de Versão", "pages": [ "pt-BR/changelog" ] @@ -5230,7 +6159,7 @@ "version": "v1.12.0", "tabs": [ { - "tab": "In\u00edcio", + "tab": "Início", "icon": "house", "groups": [ { @@ -5242,11 +6171,11 @@ ] }, { - "tab": "Documenta\u00e7\u00e3o", + "tab": "Documentação", "icon": "book-open", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -5257,7 +6186,7 @@ "group": "Guias", "pages": [ { - "group": "Estrat\u00e9gia", + "group": "Estratégia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -5293,14 +6222,14 @@ ] }, { - "group": "Ferramentas de Codifica\u00e7\u00e3o", + "group": "Ferramentas de Codificação", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avan\u00e7ado", + "group": "Avançado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -5308,7 +6237,7 @@ ] }, { - "group": "Migra\u00e7\u00e3o", + "group": "Migração", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -5342,7 +6271,7 @@ ] }, { - "group": "Integra\u00e7\u00e3o MCP", + "group": "Integração MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -5376,7 +6305,7 @@ ] }, { - "group": "Web Scraping & Navega\u00e7\u00e3o", + "group": "Web Scraping & Navegação", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -5457,7 +6386,7 @@ ] }, { - "group": "Automa\u00e7\u00e3o", + "group": "Automação", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -5532,7 +6461,7 @@ "icon": "briefcase", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -5564,7 +6493,7 @@ ] }, { - "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", + "group": "Documentação de Integração", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -5640,11 +6569,11 @@ ] }, { - "tab": "Refer\u00eancia da API", + "tab": "Referência da API", "icon": "magnifying-glass", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -5669,11 +6598,11 @@ ] }, { - "tab": "Notas de Vers\u00e3o", + "tab": "Notas de Versão", "icon": "clock", "groups": [ { - "group": "Notas de Vers\u00e3o", + "group": "Notas de Versão", "pages": [ "pt-BR/changelog" ] @@ -5686,7 +6615,7 @@ "version": "v1.11.1", "tabs": [ { - "tab": "In\u00edcio", + "tab": "Início", "icon": "house", "groups": [ { @@ -5698,11 +6627,11 @@ ] }, { - "tab": "Documenta\u00e7\u00e3o", + "tab": "Documentação", "icon": "book-open", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -5713,7 +6642,7 @@ "group": "Guias", "pages": [ { - "group": "Estrat\u00e9gia", + "group": "Estratégia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -5749,14 +6678,14 @@ ] }, { - "group": "Ferramentas de Codifica\u00e7\u00e3o", + "group": "Ferramentas de Codificação", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avan\u00e7ado", + "group": "Avançado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -5764,7 +6693,7 @@ ] }, { - "group": "Migra\u00e7\u00e3o", + "group": "Migração", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -5798,7 +6727,7 @@ ] }, { - "group": "Integra\u00e7\u00e3o MCP", + "group": "Integração MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -5832,7 +6761,7 @@ ] }, { - "group": "Web Scraping & Navega\u00e7\u00e3o", + "group": "Web Scraping & Navegação", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -5913,7 +6842,7 @@ ] }, { - "group": "Automa\u00e7\u00e3o", + "group": "Automação", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -5988,7 +6917,7 @@ "icon": "briefcase", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -6020,7 +6949,7 @@ ] }, { - "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", + "group": "Documentação de Integração", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -6096,11 +7025,11 @@ ] }, { - "tab": "Refer\u00eancia da API", + "tab": "Referência da API", "icon": "magnifying-glass", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -6125,11 +7054,11 @@ ] }, { - "tab": "Notas de Vers\u00e3o", + "tab": "Notas de Versão", "icon": "clock", "groups": [ { - "group": "Notas de Vers\u00e3o", + "group": "Notas de Versão", "pages": [ "pt-BR/changelog" ] @@ -6142,7 +7071,7 @@ "version": "v1.11.0", "tabs": [ { - "tab": "In\u00edcio", + "tab": "Início", "icon": "house", "groups": [ { @@ -6154,11 +7083,11 @@ ] }, { - "tab": "Documenta\u00e7\u00e3o", + "tab": "Documentação", "icon": "book-open", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -6169,7 +7098,7 @@ "group": "Guias", "pages": [ { - "group": "Estrat\u00e9gia", + "group": "Estratégia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -6205,14 +7134,14 @@ ] }, { - "group": "Ferramentas de Codifica\u00e7\u00e3o", + "group": "Ferramentas de Codificação", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avan\u00e7ado", + "group": "Avançado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -6220,7 +7149,7 @@ ] }, { - "group": "Migra\u00e7\u00e3o", + "group": "Migração", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -6253,7 +7182,7 @@ ] }, { - "group": "Integra\u00e7\u00e3o MCP", + "group": "Integração MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -6287,7 +7216,7 @@ ] }, { - "group": "Web Scraping & Navega\u00e7\u00e3o", + "group": "Web Scraping & Navegação", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -6368,7 +7297,7 @@ ] }, { - "group": "Automa\u00e7\u00e3o", + "group": "Automação", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -6443,7 +7372,7 @@ "icon": "briefcase", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -6475,7 +7404,7 @@ ] }, { - "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", + "group": "Documentação de Integração", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -6551,11 +7480,11 @@ ] }, { - "tab": "Refer\u00eancia da API", + "tab": "Referência da API", "icon": "magnifying-glass", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -6580,11 +7509,11 @@ ] }, { - "tab": "Notas de Vers\u00e3o", + "tab": "Notas de Versão", "icon": "clock", "groups": [ { - "group": "Notas de Vers\u00e3o", + "group": "Notas de Versão", "pages": [ "pt-BR/changelog" ] @@ -6597,7 +7526,7 @@ "version": "v1.10.1", "tabs": [ { - "tab": "In\u00edcio", + "tab": "Início", "icon": "house", "groups": [ { @@ -6609,11 +7538,11 @@ ] }, { - "tab": "Documenta\u00e7\u00e3o", + "tab": "Documentação", "icon": "book-open", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -6624,7 +7553,7 @@ "group": "Guias", "pages": [ { - "group": "Estrat\u00e9gia", + "group": "Estratégia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -6660,14 +7589,14 @@ ] }, { - "group": "Ferramentas de Codifica\u00e7\u00e3o", + "group": "Ferramentas de Codificação", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avan\u00e7ado", + "group": "Avançado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -6675,7 +7604,7 @@ ] }, { - "group": "Migra\u00e7\u00e3o", + "group": "Migração", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -6708,7 +7637,7 @@ ] }, { - "group": "Integra\u00e7\u00e3o MCP", + "group": "Integração MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -6742,7 +7671,7 @@ ] }, { - "group": "Web Scraping & Navega\u00e7\u00e3o", + "group": "Web Scraping & Navegação", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -6823,7 +7752,7 @@ ] }, { - "group": "Automa\u00e7\u00e3o", + "group": "Automação", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -6898,7 +7827,7 @@ "icon": "briefcase", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -6930,7 +7859,7 @@ ] }, { - "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", + "group": "Documentação de Integração", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -7006,11 +7935,11 @@ ] }, { - "tab": "Refer\u00eancia da API", + "tab": "Referência da API", "icon": "magnifying-glass", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -7035,11 +7964,11 @@ ] }, { - "tab": "Notas de Vers\u00e3o", + "tab": "Notas de Versão", "icon": "clock", "groups": [ { - "group": "Notas de Vers\u00e3o", + "group": "Notas de Versão", "pages": [ "pt-BR/changelog" ] @@ -7052,7 +7981,7 @@ "version": "v1.10.0", "tabs": [ { - "tab": "In\u00edcio", + "tab": "Início", "icon": "house", "groups": [ { @@ -7064,11 +7993,11 @@ ] }, { - "tab": "Documenta\u00e7\u00e3o", + "tab": "Documentação", "icon": "book-open", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/introduction", "pt-BR/installation", @@ -7079,7 +8008,7 @@ "group": "Guias", "pages": [ { - "group": "Estrat\u00e9gia", + "group": "Estratégia", "icon": "compass", "pages": [ "pt-BR/guides/concepts/evaluating-use-cases" @@ -7115,14 +8044,14 @@ ] }, { - "group": "Ferramentas de Codifica\u00e7\u00e3o", + "group": "Ferramentas de Codificação", "icon": "terminal", "pages": [ "pt-BR/guides/coding-tools/agents-md" ] }, { - "group": "Avan\u00e7ado", + "group": "Avançado", "icon": "gear", "pages": [ "pt-BR/guides/advanced/customizing-prompts", @@ -7130,7 +8059,7 @@ ] }, { - "group": "Migra\u00e7\u00e3o", + "group": "Migração", "icon": "shuffle", "pages": [ "pt-BR/guides/migration/migrating-from-langgraph" @@ -7164,7 +8093,7 @@ ] }, { - "group": "Integra\u00e7\u00e3o MCP", + "group": "Integração MCP", "pages": [ "pt-BR/mcp/overview", "pt-BR/mcp/dsl-integration", @@ -7198,7 +8127,7 @@ ] }, { - "group": "Web Scraping & Navega\u00e7\u00e3o", + "group": "Web Scraping & Navegação", "icon": "globe", "pages": [ "pt-BR/tools/web-scraping/overview", @@ -7279,7 +8208,7 @@ ] }, { - "group": "Automa\u00e7\u00e3o", + "group": "Automação", "icon": "bolt", "pages": [ "pt-BR/tools/automation/overview", @@ -7354,7 +8283,7 @@ "icon": "briefcase", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/enterprise/introduction" ] @@ -7386,7 +8315,7 @@ ] }, { - "group": "Documenta\u00e7\u00e3o de Integra\u00e7\u00e3o", + "group": "Documentação de Integração", "pages": [ "pt-BR/enterprise/integrations/asana", "pt-BR/enterprise/integrations/box", @@ -7462,11 +8391,11 @@ ] }, { - "tab": "Refer\u00eancia da API", + "tab": "Referência da API", "icon": "magnifying-glass", "groups": [ { - "group": "Come\u00e7ando", + "group": "Começando", "pages": [ "pt-BR/api-reference/introduction", "pt-BR/api-reference/inputs", @@ -7491,11 +8420,11 @@ ] }, { - "tab": "Notas de Vers\u00e3o", + "tab": "Notas de Versão", "icon": "clock", "groups": [ { - "group": "Notas de Vers\u00e3o", + "group": "Notas de Versão", "pages": [ "pt-BR/changelog" ] @@ -7511,17 +8440,17 @@ "global": { "anchors": [ { - "anchor": "\uc6f9\uc0ac\uc774\ud2b8", + "anchor": "웹사이트", "href": "https://crewai.com", "icon": "globe" }, { - "anchor": "\ud3ec\ub7fc", + "anchor": "포럼", "href": "https://community.crewai.com", "icon": "discourse" }, { - "anchor": "\ube14\ub85c\uadf8", + "anchor": "블로그", "href": "https://blog.crewai.com", "icon": "newspaper" }, @@ -7534,15 +8463,15 @@ }, "versions": [ { - "version": "v1.13.0", + "version": "v1.14.0", "default": true, "tabs": [ { - "tab": "\ud648", + "tab": "홈", "icon": "house", "groups": [ { - "group": "\ud658\uc601\ud569\ub2c8\ub2e4", + "group": "환영합니다", "pages": [ "ko/index" ] @@ -7550,11 +8479,11 @@ ] }, { - "tab": "\uae30\uc220 \ubb38\uc11c", + "tab": "기술 문서", "icon": "book-open", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/introduction", "ko/installation", @@ -7562,31 +8491,31 @@ ] }, { - "group": "\uac00\uc774\ub4dc", + "group": "가이드", "pages": [ { - "group": "\uc804\ub7b5", + "group": "전략", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", + "group": "에이전트 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "\ud06c\ub8e8 (Crews)", + "group": "크루 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "\ud50c\ub85c\uc6b0 (Flows)", + "group": "플로우 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -7594,21 +8523,21 @@ ] }, { - "group": "\ub3c4\uad6c", + "group": "도구", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "\ucf54\ub529 \ub3c4\uad6c", + "group": "코딩 도구", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "\uace0\uae09", + "group": "고급", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -7616,7 +8545,7 @@ ] }, { - "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", + "group": "마이그레이션", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -7625,7 +8554,7 @@ ] }, { - "group": "\ud575\uc2ec \uac1c\ub150", + "group": "핵심 개념", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -7651,7 +8580,7 @@ ] }, { - "group": "MCP \ud1b5\ud569", + "group": "MCP 통합", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -7663,11 +8592,11 @@ ] }, { - "group": "\ub3c4\uad6c (Tools)", + "group": "도구 (Tools)", "pages": [ "ko/tools/overview", { - "group": "\ud30c\uc77c & \ubb38\uc11c", + "group": "파일 & 문서", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -7687,7 +8616,7 @@ ] }, { - "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", + "group": "웹 스크래핑 & 브라우징", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -7707,7 +8636,7 @@ ] }, { - "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", + "group": "검색 및 연구", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -7729,7 +8658,7 @@ ] }, { - "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", + "group": "데이터베이스 & 데이터", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -7744,7 +8673,7 @@ ] }, { - "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", + "group": "인공지능 & 머신러닝", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -7758,7 +8687,7 @@ ] }, { - "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", + "group": "클라우드 & 스토리지", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -7777,7 +8706,7 @@ ] }, { - "group": "\uc790\ub3d9\ud654", + "group": "자동화", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -7812,7 +8741,7 @@ ] }, { - "group": "\ud559\uc2b5", + "group": "학습", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -7849,17 +8778,17 @@ ] }, { - "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", + "tab": "엔터프라이즈", "icon": "briefcase", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "\ube4c\ub4dc", + "group": "빌드", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -7870,7 +8799,7 @@ ] }, { - "group": "\uc6b4\uc601", + "group": "운영", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -7879,13 +8808,13 @@ ] }, { - "group": "\uad00\ub9ac", + "group": "관리", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "\ud1b5\ud569 \ubb38\uc11c", + "group": "통합 문서", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -7937,7 +8866,7 @@ ] }, { - "group": "\ud2b8\ub9ac\uac70", + "group": "트리거", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -7953,7 +8882,7 @@ ] }, { - "group": "\ud559\uc2b5 \uc790\uc6d0", + "group": "학습 자원", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -7961,11 +8890,11 @@ ] }, { - "tab": "API \ub808\ud37c\ub7f0\uc2a4", + "tab": "API 레퍼런스", "icon": "magnifying-glass", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -7977,11 +8906,11 @@ ] }, { - "tab": "\uc608\uc2dc", + "tab": "예시", "icon": "code", "groups": [ { - "group": "\uc608\uc2dc", + "group": "예시", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -7990,11 +8919,480 @@ ] }, { - "tab": "\ubcc0\uacbd \ub85c\uadf8", + "tab": "변경 로그", "icon": "clock", "groups": [ { - "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", + "group": "릴리스 노트", + "pages": [ + "ko/changelog" + ] + } + ] + } + ] + }, + { + "version": "v1.13.0", + "tabs": [ + { + "tab": "홈", + "icon": "house", + "groups": [ + { + "group": "환영합니다", + "pages": [ + "ko/index" + ] + } + ] + }, + { + "tab": "기술 문서", + "icon": "book-open", + "groups": [ + { + "group": "시작 안내", + "pages": [ + "ko/introduction", + "ko/installation", + "ko/quickstart" + ] + }, + { + "group": "가이드", + "pages": [ + { + "group": "전략", + "icon": "compass", + "pages": [ + "ko/guides/concepts/evaluating-use-cases" + ] + }, + { + "group": "에이전트 (Agents)", + "icon": "user", + "pages": [ + "ko/guides/agents/crafting-effective-agents" + ] + }, + { + "group": "크루 (Crews)", + "icon": "users", + "pages": [ + "ko/guides/crews/first-crew" + ] + }, + { + "group": "플로우 (Flows)", + "icon": "code-branch", + "pages": [ + "ko/guides/flows/first-flow", + "ko/guides/flows/mastering-flow-state" + ] + }, + { + "group": "도구", + "icon": "wrench", + "pages": [ + "ko/guides/tools/publish-custom-tools" + ] + }, + { + "group": "코딩 도구", + "icon": "terminal", + "pages": [ + "ko/guides/coding-tools/agents-md" + ] + }, + { + "group": "고급", + "icon": "gear", + "pages": [ + "ko/guides/advanced/customizing-prompts", + "ko/guides/advanced/fingerprinting" + ] + }, + { + "group": "마이그레이션", + "icon": "shuffle", + "pages": [ + "ko/guides/migration/migrating-from-langgraph" + ] + } + ] + }, + { + "group": "핵심 개념", + "pages": [ + "ko/concepts/agents", + "ko/concepts/tasks", + "ko/concepts/agent-capabilities", + "ko/concepts/crews", + "ko/concepts/flows", + "ko/concepts/production-architecture", + "ko/concepts/knowledge", + "ko/concepts/skills", + "ko/concepts/llms", + "ko/concepts/files", + "ko/concepts/processes", + "ko/concepts/collaboration", + "ko/concepts/training", + "ko/concepts/memory", + "ko/concepts/reasoning", + "ko/concepts/planning", + "ko/concepts/testing", + "ko/concepts/cli", + "ko/concepts/tools", + "ko/concepts/event-listener", + "ko/concepts/checkpointing" + ] + }, + { + "group": "MCP 통합", + "pages": [ + "ko/mcp/overview", + "ko/mcp/dsl-integration", + "ko/mcp/stdio", + "ko/mcp/sse", + "ko/mcp/streamable-http", + "ko/mcp/multiple-servers", + "ko/mcp/security" + ] + }, + { + "group": "도구 (Tools)", + "pages": [ + "ko/tools/overview", + { + "group": "파일 & 문서", + "icon": "folder-open", + "pages": [ + "ko/tools/file-document/overview", + "ko/tools/file-document/filereadtool", + "ko/tools/file-document/filewritetool", + "ko/tools/file-document/pdfsearchtool", + "ko/tools/file-document/docxsearchtool", + "ko/tools/file-document/mdxsearchtool", + "ko/tools/file-document/xmlsearchtool", + "ko/tools/file-document/txtsearchtool", + "ko/tools/file-document/jsonsearchtool", + "ko/tools/file-document/csvsearchtool", + "ko/tools/file-document/directorysearchtool", + "ko/tools/file-document/directoryreadtool", + "ko/tools/file-document/ocrtool", + "ko/tools/file-document/pdf-text-writing-tool" + ] + }, + { + "group": "웹 스크래핑 & 브라우징", + "icon": "globe", + "pages": [ + "ko/tools/web-scraping/overview", + "ko/tools/web-scraping/scrapewebsitetool", + "ko/tools/web-scraping/scrapeelementfromwebsitetool", + "ko/tools/web-scraping/scrapflyscrapetool", + "ko/tools/web-scraping/seleniumscrapingtool", + "ko/tools/web-scraping/scrapegraphscrapetool", + "ko/tools/web-scraping/spidertool", + "ko/tools/web-scraping/browserbaseloadtool", + "ko/tools/web-scraping/hyperbrowserloadtool", + "ko/tools/web-scraping/stagehandtool", + "ko/tools/web-scraping/firecrawlcrawlwebsitetool", + "ko/tools/web-scraping/firecrawlscrapewebsitetool", + "ko/tools/web-scraping/oxylabsscraperstool", + "ko/tools/web-scraping/brightdata-tools" + ] + }, + { + "group": "검색 및 연구", + "icon": "magnifying-glass", + "pages": [ + "ko/tools/search-research/overview", + "ko/tools/search-research/serperdevtool", + "ko/tools/search-research/bravesearchtool", + "ko/tools/search-research/exasearchtool", + "ko/tools/search-research/linkupsearchtool", + "ko/tools/search-research/githubsearchtool", + "ko/tools/search-research/websitesearchtool", + "ko/tools/search-research/codedocssearchtool", + "ko/tools/search-research/youtubechannelsearchtool", + "ko/tools/search-research/youtubevideosearchtool", + "ko/tools/search-research/tavilysearchtool", + "ko/tools/search-research/tavilyextractortool", + "ko/tools/search-research/arxivpapertool", + "ko/tools/search-research/serpapi-googlesearchtool", + "ko/tools/search-research/serpapi-googleshoppingtool", + "ko/tools/search-research/databricks-query-tool" + ] + }, + { + "group": "데이터베이스 & 데이터", + "icon": "database", + "pages": [ + "ko/tools/database-data/overview", + "ko/tools/database-data/mysqltool", + "ko/tools/database-data/pgsearchtool", + "ko/tools/database-data/snowflakesearchtool", + "ko/tools/database-data/nl2sqltool", + "ko/tools/database-data/qdrantvectorsearchtool", + "ko/tools/database-data/weaviatevectorsearchtool", + "ko/tools/database-data/mongodbvectorsearchtool", + "ko/tools/database-data/singlestoresearchtool" + ] + }, + { + "group": "인공지능 & 머신러닝", + "icon": "brain", + "pages": [ + "ko/tools/ai-ml/overview", + "ko/tools/ai-ml/dalletool", + "ko/tools/ai-ml/visiontool", + "ko/tools/ai-ml/aimindtool", + "ko/tools/ai-ml/llamaindextool", + "ko/tools/ai-ml/langchaintool", + "ko/tools/ai-ml/ragtool", + "ko/tools/ai-ml/codeinterpretertool" + ] + }, + { + "group": "클라우드 & 스토리지", + "icon": "cloud", + "pages": [ + "ko/tools/cloud-storage/overview", + "ko/tools/cloud-storage/s3readertool", + "ko/tools/cloud-storage/s3writertool", + "ko/tools/cloud-storage/bedrockkbretriever" + ] + }, + { + "group": "Integrations", + "icon": "plug", + "pages": [ + "ko/tools/integration/overview", + "ko/tools/integration/bedrockinvokeagenttool", + "ko/tools/integration/crewaiautomationtool" + ] + }, + { + "group": "자동화", + "icon": "bolt", + "pages": [ + "ko/tools/automation/overview", + "ko/tools/automation/apifyactorstool", + "ko/tools/automation/composiotool", + "ko/tools/automation/multiontool", + "ko/tools/automation/zapieractionstool" + ] + } + ] + }, + { + "group": "Observability", + "pages": [ + "ko/observability/tracing", + "ko/observability/overview", + "ko/observability/arize-phoenix", + "ko/observability/braintrust", + "ko/observability/datadog", + "ko/observability/galileo", + "ko/observability/langdb", + "ko/observability/langfuse", + "ko/observability/langtrace", + "ko/observability/maxim", + "ko/observability/mlflow", + "ko/observability/neatlogs", + "ko/observability/openlit", + "ko/observability/opik", + "ko/observability/patronus-evaluation", + "ko/observability/portkey", + "ko/observability/weave" + ] + }, + { + "group": "학습", + "pages": [ + "ko/learn/overview", + "ko/learn/llm-selection-guide", + "ko/learn/conditional-tasks", + "ko/learn/coding-agents", + "ko/learn/create-custom-tools", + "ko/learn/custom-llm", + "ko/learn/custom-manager-agent", + "ko/learn/customizing-agents", + "ko/learn/dalle-image-generation", + "ko/learn/force-tool-output-as-result", + "ko/learn/hierarchical-process", + "ko/learn/human-input-on-execution", + "ko/learn/human-in-the-loop", + "ko/learn/human-feedback-in-flows", + "ko/learn/kickoff-async", + "ko/learn/kickoff-for-each", + "ko/learn/llm-connections", + "ko/learn/multimodal-agents", + "ko/learn/replay-tasks-from-latest-crew-kickoff", + "ko/learn/sequential-process", + "ko/learn/using-annotations", + "ko/learn/execution-hooks", + "ko/learn/llm-hooks", + "ko/learn/tool-hooks" + ] + }, + { + "group": "Telemetry", + "pages": [ + "ko/telemetry" + ] + } + ] + }, + { + "tab": "엔터프라이즈", + "icon": "briefcase", + "groups": [ + { + "group": "시작 안내", + "pages": [ + "ko/enterprise/introduction" + ] + }, + { + "group": "빌드", + "pages": [ + "ko/enterprise/features/automations", + "ko/enterprise/features/crew-studio", + "ko/enterprise/features/marketplace", + "ko/enterprise/features/agent-repositories", + "ko/enterprise/features/tools-and-integrations", + "ko/enterprise/features/pii-trace-redactions" + ] + }, + { + "group": "운영", + "pages": [ + "ko/enterprise/features/traces", + "ko/enterprise/features/webhook-streaming", + "ko/enterprise/features/hallucination-guardrail", + "ko/enterprise/features/flow-hitl-management" + ] + }, + { + "group": "관리", + "pages": [ + "ko/enterprise/features/rbac" + ] + }, + { + "group": "통합 문서", + "pages": [ + "ko/enterprise/integrations/asana", + "ko/enterprise/integrations/box", + "ko/enterprise/integrations/clickup", + "ko/enterprise/integrations/github", + "ko/enterprise/integrations/gmail", + "ko/enterprise/integrations/google_calendar", + "ko/enterprise/integrations/google_contacts", + "ko/enterprise/integrations/google_docs", + "ko/enterprise/integrations/google_drive", + "ko/enterprise/integrations/google_sheets", + "ko/enterprise/integrations/google_slides", + "ko/enterprise/integrations/hubspot", + "ko/enterprise/integrations/jira", + "ko/enterprise/integrations/linear", + "ko/enterprise/integrations/microsoft_excel", + "ko/enterprise/integrations/microsoft_onedrive", + "ko/enterprise/integrations/microsoft_outlook", + "ko/enterprise/integrations/microsoft_sharepoint", + "ko/enterprise/integrations/microsoft_teams", + "ko/enterprise/integrations/microsoft_word", + "ko/enterprise/integrations/notion", + "ko/enterprise/integrations/salesforce", + "ko/enterprise/integrations/shopify", + "ko/enterprise/integrations/slack", + "ko/enterprise/integrations/stripe", + "ko/enterprise/integrations/zendesk" + ] + }, + { + "group": "How-To Guides", + "pages": [ + "ko/enterprise/guides/build-crew", + "ko/enterprise/guides/prepare-for-deployment", + "ko/enterprise/guides/deploy-to-amp", + "ko/enterprise/guides/private-package-registry", + "ko/enterprise/guides/kickoff-crew", + "ko/enterprise/guides/training-crews", + "ko/enterprise/guides/update-crew", + "ko/enterprise/guides/enable-crew-studio", + "ko/enterprise/guides/capture_telemetry_logs", + "ko/enterprise/guides/azure-openai-setup", + "ko/enterprise/guides/tool-repository", + "ko/enterprise/guides/custom-mcp-server", + "ko/enterprise/guides/react-component-export", + "ko/enterprise/guides/team-management", + "ko/enterprise/guides/human-in-the-loop", + "ko/enterprise/guides/webhook-automation" + ] + }, + { + "group": "트리거", + "pages": [ + "ko/enterprise/guides/automation-triggers", + "ko/enterprise/guides/gmail-trigger", + "ko/enterprise/guides/google-calendar-trigger", + "ko/enterprise/guides/google-drive-trigger", + "ko/enterprise/guides/outlook-trigger", + "ko/enterprise/guides/onedrive-trigger", + "ko/enterprise/guides/microsoft-teams-trigger", + "ko/enterprise/guides/slack-trigger", + "ko/enterprise/guides/hubspot-trigger", + "ko/enterprise/guides/salesforce-trigger", + "ko/enterprise/guides/zapier-trigger" + ] + }, + { + "group": "학습 자원", + "pages": [ + "ko/enterprise/resources/frequently-asked-questions" + ] + } + ] + }, + { + "tab": "API 레퍼런스", + "icon": "magnifying-glass", + "groups": [ + { + "group": "시작 안내", + "pages": [ + "ko/api-reference/introduction", + "ko/api-reference/inputs", + "ko/api-reference/kickoff", + "ko/api-reference/resume", + "ko/api-reference/status" + ] + } + ] + }, + { + "tab": "예시", + "icon": "code", + "groups": [ + { + "group": "예시", + "pages": [ + "ko/examples/example", + "ko/examples/cookbooks" + ] + } + ] + }, + { + "tab": "변경 로그", + "icon": "clock", + "groups": [ + { + "group": "릴리스 노트", "pages": [ "ko/changelog" ] @@ -8007,11 +9405,11 @@ "version": "v1.12.2", "tabs": [ { - "tab": "\ud648", + "tab": "홈", "icon": "house", "groups": [ { - "group": "\ud658\uc601\ud569\ub2c8\ub2e4", + "group": "환영합니다", "pages": [ "ko/index" ] @@ -8019,11 +9417,11 @@ ] }, { - "tab": "\uae30\uc220 \ubb38\uc11c", + "tab": "기술 문서", "icon": "book-open", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/introduction", "ko/installation", @@ -8031,31 +9429,31 @@ ] }, { - "group": "\uac00\uc774\ub4dc", + "group": "가이드", "pages": [ { - "group": "\uc804\ub7b5", + "group": "전략", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", + "group": "에이전트 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "\ud06c\ub8e8 (Crews)", + "group": "크루 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "\ud50c\ub85c\uc6b0 (Flows)", + "group": "플로우 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -8063,21 +9461,21 @@ ] }, { - "group": "\ub3c4\uad6c", + "group": "도구", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "\ucf54\ub529 \ub3c4\uad6c", + "group": "코딩 도구", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "\uace0\uae09", + "group": "고급", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -8085,7 +9483,7 @@ ] }, { - "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", + "group": "마이그레이션", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -8094,7 +9492,7 @@ ] }, { - "group": "\ud575\uc2ec \uac1c\ub150", + "group": "핵심 개념", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -8120,7 +9518,7 @@ ] }, { - "group": "MCP \ud1b5\ud569", + "group": "MCP 통합", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -8132,11 +9530,11 @@ ] }, { - "group": "\ub3c4\uad6c (Tools)", + "group": "도구 (Tools)", "pages": [ "ko/tools/overview", { - "group": "\ud30c\uc77c & \ubb38\uc11c", + "group": "파일 & 문서", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -8156,7 +9554,7 @@ ] }, { - "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", + "group": "웹 스크래핑 & 브라우징", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -8176,7 +9574,7 @@ ] }, { - "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", + "group": "검색 및 연구", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -8198,7 +9596,7 @@ ] }, { - "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", + "group": "데이터베이스 & 데이터", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -8213,7 +9611,7 @@ ] }, { - "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", + "group": "인공지능 & 머신러닝", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -8227,7 +9625,7 @@ ] }, { - "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", + "group": "클라우드 & 스토리지", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -8246,7 +9644,7 @@ ] }, { - "group": "\uc790\ub3d9\ud654", + "group": "자동화", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -8281,7 +9679,7 @@ ] }, { - "group": "\ud559\uc2b5", + "group": "학습", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -8318,17 +9716,17 @@ ] }, { - "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", + "tab": "엔터프라이즈", "icon": "briefcase", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "\ube4c\ub4dc", + "group": "빌드", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -8339,7 +9737,7 @@ ] }, { - "group": "\uc6b4\uc601", + "group": "운영", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -8348,13 +9746,13 @@ ] }, { - "group": "\uad00\ub9ac", + "group": "관리", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "\ud1b5\ud569 \ubb38\uc11c", + "group": "통합 문서", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -8406,7 +9804,7 @@ ] }, { - "group": "\ud2b8\ub9ac\uac70", + "group": "트리거", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -8422,7 +9820,7 @@ ] }, { - "group": "\ud559\uc2b5 \uc790\uc6d0", + "group": "학습 자원", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -8430,11 +9828,11 @@ ] }, { - "tab": "API \ub808\ud37c\ub7f0\uc2a4", + "tab": "API 레퍼런스", "icon": "magnifying-glass", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -8446,11 +9844,11 @@ ] }, { - "tab": "\uc608\uc2dc", + "tab": "예시", "icon": "code", "groups": [ { - "group": "\uc608\uc2dc", + "group": "예시", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -8459,11 +9857,11 @@ ] }, { - "tab": "\ubcc0\uacbd \ub85c\uadf8", + "tab": "변경 로그", "icon": "clock", "groups": [ { - "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", + "group": "릴리스 노트", "pages": [ "ko/changelog" ] @@ -8476,11 +9874,11 @@ "version": "v1.12.1", "tabs": [ { - "tab": "\ud648", + "tab": "홈", "icon": "house", "groups": [ { - "group": "\ud658\uc601\ud569\ub2c8\ub2e4", + "group": "환영합니다", "pages": [ "ko/index" ] @@ -8488,11 +9886,11 @@ ] }, { - "tab": "\uae30\uc220 \ubb38\uc11c", + "tab": "기술 문서", "icon": "book-open", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/introduction", "ko/installation", @@ -8500,31 +9898,31 @@ ] }, { - "group": "\uac00\uc774\ub4dc", + "group": "가이드", "pages": [ { - "group": "\uc804\ub7b5", + "group": "전략", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", + "group": "에이전트 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "\ud06c\ub8e8 (Crews)", + "group": "크루 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "\ud50c\ub85c\uc6b0 (Flows)", + "group": "플로우 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -8532,21 +9930,21 @@ ] }, { - "group": "\ub3c4\uad6c", + "group": "도구", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "\ucf54\ub529 \ub3c4\uad6c", + "group": "코딩 도구", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "\uace0\uae09", + "group": "고급", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -8554,7 +9952,7 @@ ] }, { - "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", + "group": "마이그레이션", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -8563,7 +9961,7 @@ ] }, { - "group": "\ud575\uc2ec \uac1c\ub150", + "group": "핵심 개념", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -8588,7 +9986,7 @@ ] }, { - "group": "MCP \ud1b5\ud569", + "group": "MCP 통합", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -8600,11 +9998,11 @@ ] }, { - "group": "\ub3c4\uad6c (Tools)", + "group": "도구 (Tools)", "pages": [ "ko/tools/overview", { - "group": "\ud30c\uc77c & \ubb38\uc11c", + "group": "파일 & 문서", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -8624,7 +10022,7 @@ ] }, { - "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", + "group": "웹 스크래핑 & 브라우징", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -8644,7 +10042,7 @@ ] }, { - "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", + "group": "검색 및 연구", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -8666,7 +10064,7 @@ ] }, { - "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", + "group": "데이터베이스 & 데이터", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -8681,7 +10079,7 @@ ] }, { - "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", + "group": "인공지능 & 머신러닝", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -8695,7 +10093,7 @@ ] }, { - "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", + "group": "클라우드 & 스토리지", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -8714,7 +10112,7 @@ ] }, { - "group": "\uc790\ub3d9\ud654", + "group": "자동화", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -8749,7 +10147,7 @@ ] }, { - "group": "\ud559\uc2b5", + "group": "학습", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -8786,17 +10184,17 @@ ] }, { - "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", + "tab": "엔터프라이즈", "icon": "briefcase", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "\ube4c\ub4dc", + "group": "빌드", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -8807,7 +10205,7 @@ ] }, { - "group": "\uc6b4\uc601", + "group": "운영", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -8816,13 +10214,13 @@ ] }, { - "group": "\uad00\ub9ac", + "group": "관리", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "\ud1b5\ud569 \ubb38\uc11c", + "group": "통합 문서", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -8874,7 +10272,7 @@ ] }, { - "group": "\ud2b8\ub9ac\uac70", + "group": "트리거", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -8890,7 +10288,7 @@ ] }, { - "group": "\ud559\uc2b5 \uc790\uc6d0", + "group": "학습 자원", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -8898,11 +10296,11 @@ ] }, { - "tab": "API \ub808\ud37c\ub7f0\uc2a4", + "tab": "API 레퍼런스", "icon": "magnifying-glass", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -8914,11 +10312,11 @@ ] }, { - "tab": "\uc608\uc2dc", + "tab": "예시", "icon": "code", "groups": [ { - "group": "\uc608\uc2dc", + "group": "예시", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -8927,11 +10325,11 @@ ] }, { - "tab": "\ubcc0\uacbd \ub85c\uadf8", + "tab": "변경 로그", "icon": "clock", "groups": [ { - "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", + "group": "릴리스 노트", "pages": [ "ko/changelog" ] @@ -8944,11 +10342,11 @@ "version": "v1.12.0", "tabs": [ { - "tab": "\ud648", + "tab": "홈", "icon": "house", "groups": [ { - "group": "\ud658\uc601\ud569\ub2c8\ub2e4", + "group": "환영합니다", "pages": [ "ko/index" ] @@ -8956,11 +10354,11 @@ ] }, { - "tab": "\uae30\uc220 \ubb38\uc11c", + "tab": "기술 문서", "icon": "book-open", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/introduction", "ko/installation", @@ -8968,31 +10366,31 @@ ] }, { - "group": "\uac00\uc774\ub4dc", + "group": "가이드", "pages": [ { - "group": "\uc804\ub7b5", + "group": "전략", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", + "group": "에이전트 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "\ud06c\ub8e8 (Crews)", + "group": "크루 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "\ud50c\ub85c\uc6b0 (Flows)", + "group": "플로우 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -9000,21 +10398,21 @@ ] }, { - "group": "\ub3c4\uad6c", + "group": "도구", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "\ucf54\ub529 \ub3c4\uad6c", + "group": "코딩 도구", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "\uace0\uae09", + "group": "고급", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -9022,7 +10420,7 @@ ] }, { - "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", + "group": "마이그레이션", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -9031,7 +10429,7 @@ ] }, { - "group": "\ud575\uc2ec \uac1c\ub150", + "group": "핵심 개념", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -9056,7 +10454,7 @@ ] }, { - "group": "MCP \ud1b5\ud569", + "group": "MCP 통합", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -9068,11 +10466,11 @@ ] }, { - "group": "\ub3c4\uad6c (Tools)", + "group": "도구 (Tools)", "pages": [ "ko/tools/overview", { - "group": "\ud30c\uc77c & \ubb38\uc11c", + "group": "파일 & 문서", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -9092,7 +10490,7 @@ ] }, { - "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", + "group": "웹 스크래핑 & 브라우징", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -9112,7 +10510,7 @@ ] }, { - "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", + "group": "검색 및 연구", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -9134,7 +10532,7 @@ ] }, { - "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", + "group": "데이터베이스 & 데이터", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -9149,7 +10547,7 @@ ] }, { - "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", + "group": "인공지능 & 머신러닝", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -9163,7 +10561,7 @@ ] }, { - "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", + "group": "클라우드 & 스토리지", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -9182,7 +10580,7 @@ ] }, { - "group": "\uc790\ub3d9\ud654", + "group": "자동화", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -9217,7 +10615,7 @@ ] }, { - "group": "\ud559\uc2b5", + "group": "학습", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -9254,17 +10652,17 @@ ] }, { - "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", + "tab": "엔터프라이즈", "icon": "briefcase", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "\ube4c\ub4dc", + "group": "빌드", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -9275,7 +10673,7 @@ ] }, { - "group": "\uc6b4\uc601", + "group": "운영", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -9284,13 +10682,13 @@ ] }, { - "group": "\uad00\ub9ac", + "group": "관리", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "\ud1b5\ud569 \ubb38\uc11c", + "group": "통합 문서", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -9342,7 +10740,7 @@ ] }, { - "group": "\ud2b8\ub9ac\uac70", + "group": "트리거", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -9358,7 +10756,7 @@ ] }, { - "group": "\ud559\uc2b5 \uc790\uc6d0", + "group": "학습 자원", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -9366,11 +10764,11 @@ ] }, { - "tab": "API \ub808\ud37c\ub7f0\uc2a4", + "tab": "API 레퍼런스", "icon": "magnifying-glass", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -9382,11 +10780,11 @@ ] }, { - "tab": "\uc608\uc2dc", + "tab": "예시", "icon": "code", "groups": [ { - "group": "\uc608\uc2dc", + "group": "예시", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -9395,11 +10793,11 @@ ] }, { - "tab": "\ubcc0\uacbd \ub85c\uadf8", + "tab": "변경 로그", "icon": "clock", "groups": [ { - "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", + "group": "릴리스 노트", "pages": [ "ko/changelog" ] @@ -9412,11 +10810,11 @@ "version": "v1.11.1", "tabs": [ { - "tab": "\ud648", + "tab": "홈", "icon": "house", "groups": [ { - "group": "\ud658\uc601\ud569\ub2c8\ub2e4", + "group": "환영합니다", "pages": [ "ko/index" ] @@ -9424,11 +10822,11 @@ ] }, { - "tab": "\uae30\uc220 \ubb38\uc11c", + "tab": "기술 문서", "icon": "book-open", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/introduction", "ko/installation", @@ -9436,31 +10834,31 @@ ] }, { - "group": "\uac00\uc774\ub4dc", + "group": "가이드", "pages": [ { - "group": "\uc804\ub7b5", + "group": "전략", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", + "group": "에이전트 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "\ud06c\ub8e8 (Crews)", + "group": "크루 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "\ud50c\ub85c\uc6b0 (Flows)", + "group": "플로우 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -9468,21 +10866,21 @@ ] }, { - "group": "\ub3c4\uad6c", + "group": "도구", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "\ucf54\ub529 \ub3c4\uad6c", + "group": "코딩 도구", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "\uace0\uae09", + "group": "고급", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -9490,7 +10888,7 @@ ] }, { - "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", + "group": "마이그레이션", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -9499,7 +10897,7 @@ ] }, { - "group": "\ud575\uc2ec \uac1c\ub150", + "group": "핵심 개념", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -9524,7 +10922,7 @@ ] }, { - "group": "MCP \ud1b5\ud569", + "group": "MCP 통합", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -9536,11 +10934,11 @@ ] }, { - "group": "\ub3c4\uad6c (Tools)", + "group": "도구 (Tools)", "pages": [ "ko/tools/overview", { - "group": "\ud30c\uc77c & \ubb38\uc11c", + "group": "파일 & 문서", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -9560,7 +10958,7 @@ ] }, { - "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", + "group": "웹 스크래핑 & 브라우징", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -9580,7 +10978,7 @@ ] }, { - "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", + "group": "검색 및 연구", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -9602,7 +11000,7 @@ ] }, { - "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", + "group": "데이터베이스 & 데이터", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -9617,7 +11015,7 @@ ] }, { - "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", + "group": "인공지능 & 머신러닝", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -9631,7 +11029,7 @@ ] }, { - "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", + "group": "클라우드 & 스토리지", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -9650,7 +11048,7 @@ ] }, { - "group": "\uc790\ub3d9\ud654", + "group": "자동화", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -9685,7 +11083,7 @@ ] }, { - "group": "\ud559\uc2b5", + "group": "학습", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -9722,17 +11120,17 @@ ] }, { - "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", + "tab": "엔터프라이즈", "icon": "briefcase", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "\ube4c\ub4dc", + "group": "빌드", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -9743,7 +11141,7 @@ ] }, { - "group": "\uc6b4\uc601", + "group": "운영", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -9752,13 +11150,13 @@ ] }, { - "group": "\uad00\ub9ac", + "group": "관리", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "\ud1b5\ud569 \ubb38\uc11c", + "group": "통합 문서", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -9810,7 +11208,7 @@ ] }, { - "group": "\ud2b8\ub9ac\uac70", + "group": "트리거", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -9826,7 +11224,7 @@ ] }, { - "group": "\ud559\uc2b5 \uc790\uc6d0", + "group": "학습 자원", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -9834,11 +11232,11 @@ ] }, { - "tab": "API \ub808\ud37c\ub7f0\uc2a4", + "tab": "API 레퍼런스", "icon": "magnifying-glass", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -9850,11 +11248,11 @@ ] }, { - "tab": "\uc608\uc2dc", + "tab": "예시", "icon": "code", "groups": [ { - "group": "\uc608\uc2dc", + "group": "예시", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -9863,11 +11261,11 @@ ] }, { - "tab": "\ubcc0\uacbd \ub85c\uadf8", + "tab": "변경 로그", "icon": "clock", "groups": [ { - "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", + "group": "릴리스 노트", "pages": [ "ko/changelog" ] @@ -9880,11 +11278,11 @@ "version": "v1.11.0", "tabs": [ { - "tab": "\ud648", + "tab": "홈", "icon": "house", "groups": [ { - "group": "\ud658\uc601\ud569\ub2c8\ub2e4", + "group": "환영합니다", "pages": [ "ko/index" ] @@ -9892,11 +11290,11 @@ ] }, { - "tab": "\uae30\uc220 \ubb38\uc11c", + "tab": "기술 문서", "icon": "book-open", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/introduction", "ko/installation", @@ -9904,31 +11302,31 @@ ] }, { - "group": "\uac00\uc774\ub4dc", + "group": "가이드", "pages": [ { - "group": "\uc804\ub7b5", + "group": "전략", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", + "group": "에이전트 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "\ud06c\ub8e8 (Crews)", + "group": "크루 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "\ud50c\ub85c\uc6b0 (Flows)", + "group": "플로우 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -9936,21 +11334,21 @@ ] }, { - "group": "\ub3c4\uad6c", + "group": "도구", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "\ucf54\ub529 \ub3c4\uad6c", + "group": "코딩 도구", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "\uace0\uae09", + "group": "고급", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -9958,7 +11356,7 @@ ] }, { - "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", + "group": "마이그레이션", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -9967,7 +11365,7 @@ ] }, { - "group": "\ud575\uc2ec \uac1c\ub150", + "group": "핵심 개념", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -9991,7 +11389,7 @@ ] }, { - "group": "MCP \ud1b5\ud569", + "group": "MCP 통합", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -10003,11 +11401,11 @@ ] }, { - "group": "\ub3c4\uad6c (Tools)", + "group": "도구 (Tools)", "pages": [ "ko/tools/overview", { - "group": "\ud30c\uc77c & \ubb38\uc11c", + "group": "파일 & 문서", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -10027,7 +11425,7 @@ ] }, { - "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", + "group": "웹 스크래핑 & 브라우징", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -10047,7 +11445,7 @@ ] }, { - "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", + "group": "검색 및 연구", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -10069,7 +11467,7 @@ ] }, { - "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", + "group": "데이터베이스 & 데이터", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -10084,7 +11482,7 @@ ] }, { - "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", + "group": "인공지능 & 머신러닝", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -10098,7 +11496,7 @@ ] }, { - "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", + "group": "클라우드 & 스토리지", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -10117,7 +11515,7 @@ ] }, { - "group": "\uc790\ub3d9\ud654", + "group": "자동화", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -10152,7 +11550,7 @@ ] }, { - "group": "\ud559\uc2b5", + "group": "학습", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -10189,17 +11587,17 @@ ] }, { - "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", + "tab": "엔터프라이즈", "icon": "briefcase", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "\ube4c\ub4dc", + "group": "빌드", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -10210,7 +11608,7 @@ ] }, { - "group": "\uc6b4\uc601", + "group": "운영", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -10219,13 +11617,13 @@ ] }, { - "group": "\uad00\ub9ac", + "group": "관리", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "\ud1b5\ud569 \ubb38\uc11c", + "group": "통합 문서", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -10277,7 +11675,7 @@ ] }, { - "group": "\ud2b8\ub9ac\uac70", + "group": "트리거", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -10293,7 +11691,7 @@ ] }, { - "group": "\ud559\uc2b5 \uc790\uc6d0", + "group": "학습 자원", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -10301,11 +11699,11 @@ ] }, { - "tab": "API \ub808\ud37c\ub7f0\uc2a4", + "tab": "API 레퍼런스", "icon": "magnifying-glass", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -10317,11 +11715,11 @@ ] }, { - "tab": "\uc608\uc2dc", + "tab": "예시", "icon": "code", "groups": [ { - "group": "\uc608\uc2dc", + "group": "예시", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -10330,11 +11728,11 @@ ] }, { - "tab": "\ubcc0\uacbd \ub85c\uadf8", + "tab": "변경 로그", "icon": "clock", "groups": [ { - "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", + "group": "릴리스 노트", "pages": [ "ko/changelog" ] @@ -10347,11 +11745,11 @@ "version": "v1.10.1", "tabs": [ { - "tab": "\ud648", + "tab": "홈", "icon": "house", "groups": [ { - "group": "\ud658\uc601\ud569\ub2c8\ub2e4", + "group": "환영합니다", "pages": [ "ko/index" ] @@ -10359,11 +11757,11 @@ ] }, { - "tab": "\uae30\uc220 \ubb38\uc11c", + "tab": "기술 문서", "icon": "book-open", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/introduction", "ko/installation", @@ -10371,31 +11769,31 @@ ] }, { - "group": "\uac00\uc774\ub4dc", + "group": "가이드", "pages": [ { - "group": "\uc804\ub7b5", + "group": "전략", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", + "group": "에이전트 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "\ud06c\ub8e8 (Crews)", + "group": "크루 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "\ud50c\ub85c\uc6b0 (Flows)", + "group": "플로우 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -10403,21 +11801,21 @@ ] }, { - "group": "\ub3c4\uad6c", + "group": "도구", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "\ucf54\ub529 \ub3c4\uad6c", + "group": "코딩 도구", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "\uace0\uae09", + "group": "고급", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -10425,7 +11823,7 @@ ] }, { - "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", + "group": "마이그레이션", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -10434,7 +11832,7 @@ ] }, { - "group": "\ud575\uc2ec \uac1c\ub150", + "group": "핵심 개념", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -10458,7 +11856,7 @@ ] }, { - "group": "MCP \ud1b5\ud569", + "group": "MCP 통합", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -10470,11 +11868,11 @@ ] }, { - "group": "\ub3c4\uad6c (Tools)", + "group": "도구 (Tools)", "pages": [ "ko/tools/overview", { - "group": "\ud30c\uc77c & \ubb38\uc11c", + "group": "파일 & 문서", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -10494,7 +11892,7 @@ ] }, { - "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", + "group": "웹 스크래핑 & 브라우징", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -10514,7 +11912,7 @@ ] }, { - "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", + "group": "검색 및 연구", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -10536,7 +11934,7 @@ ] }, { - "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", + "group": "데이터베이스 & 데이터", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -10551,7 +11949,7 @@ ] }, { - "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", + "group": "인공지능 & 머신러닝", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -10565,7 +11963,7 @@ ] }, { - "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", + "group": "클라우드 & 스토리지", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -10584,7 +11982,7 @@ ] }, { - "group": "\uc790\ub3d9\ud654", + "group": "자동화", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -10619,7 +12017,7 @@ ] }, { - "group": "\ud559\uc2b5", + "group": "학습", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -10656,17 +12054,17 @@ ] }, { - "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", + "tab": "엔터프라이즈", "icon": "briefcase", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "\ube4c\ub4dc", + "group": "빌드", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -10677,7 +12075,7 @@ ] }, { - "group": "\uc6b4\uc601", + "group": "운영", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -10686,13 +12084,13 @@ ] }, { - "group": "\uad00\ub9ac", + "group": "관리", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "\ud1b5\ud569 \ubb38\uc11c", + "group": "통합 문서", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -10744,7 +12142,7 @@ ] }, { - "group": "\ud2b8\ub9ac\uac70", + "group": "트리거", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -10760,7 +12158,7 @@ ] }, { - "group": "\ud559\uc2b5 \uc790\uc6d0", + "group": "학습 자원", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -10768,11 +12166,11 @@ ] }, { - "tab": "API \ub808\ud37c\ub7f0\uc2a4", + "tab": "API 레퍼런스", "icon": "magnifying-glass", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -10784,11 +12182,11 @@ ] }, { - "tab": "\uc608\uc2dc", + "tab": "예시", "icon": "code", "groups": [ { - "group": "\uc608\uc2dc", + "group": "예시", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -10797,11 +12195,11 @@ ] }, { - "tab": "\ubcc0\uacbd \ub85c\uadf8", + "tab": "변경 로그", "icon": "clock", "groups": [ { - "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", + "group": "릴리스 노트", "pages": [ "ko/changelog" ] @@ -10814,11 +12212,11 @@ "version": "v1.10.0", "tabs": [ { - "tab": "\ud648", + "tab": "홈", "icon": "house", "groups": [ { - "group": "\ud658\uc601\ud569\ub2c8\ub2e4", + "group": "환영합니다", "pages": [ "ko/index" ] @@ -10826,11 +12224,11 @@ ] }, { - "tab": "\uae30\uc220 \ubb38\uc11c", + "tab": "기술 문서", "icon": "book-open", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/introduction", "ko/installation", @@ -10838,31 +12236,31 @@ ] }, { - "group": "\uac00\uc774\ub4dc", + "group": "가이드", "pages": [ { - "group": "\uc804\ub7b5", + "group": "전략", "icon": "compass", "pages": [ "ko/guides/concepts/evaluating-use-cases" ] }, { - "group": "\uc5d0\uc774\uc804\ud2b8 (Agents)", + "group": "에이전트 (Agents)", "icon": "user", "pages": [ "ko/guides/agents/crafting-effective-agents" ] }, { - "group": "\ud06c\ub8e8 (Crews)", + "group": "크루 (Crews)", "icon": "users", "pages": [ "ko/guides/crews/first-crew" ] }, { - "group": "\ud50c\ub85c\uc6b0 (Flows)", + "group": "플로우 (Flows)", "icon": "code-branch", "pages": [ "ko/guides/flows/first-flow", @@ -10870,21 +12268,21 @@ ] }, { - "group": "\ub3c4\uad6c", + "group": "도구", "icon": "wrench", "pages": [ "ko/guides/tools/publish-custom-tools" ] }, { - "group": "\ucf54\ub529 \ub3c4\uad6c", + "group": "코딩 도구", "icon": "terminal", "pages": [ "ko/guides/coding-tools/agents-md" ] }, { - "group": "\uace0\uae09", + "group": "고급", "icon": "gear", "pages": [ "ko/guides/advanced/customizing-prompts", @@ -10892,7 +12290,7 @@ ] }, { - "group": "\ub9c8\uc774\uadf8\ub808\uc774\uc158", + "group": "마이그레이션", "icon": "shuffle", "pages": [ "ko/guides/migration/migrating-from-langgraph" @@ -10901,7 +12299,7 @@ ] }, { - "group": "\ud575\uc2ec \uac1c\ub150", + "group": "핵심 개념", "pages": [ "ko/concepts/agents", "ko/concepts/tasks", @@ -10926,7 +12324,7 @@ ] }, { - "group": "MCP \ud1b5\ud569", + "group": "MCP 통합", "pages": [ "ko/mcp/overview", "ko/mcp/dsl-integration", @@ -10938,11 +12336,11 @@ ] }, { - "group": "\ub3c4\uad6c (Tools)", + "group": "도구 (Tools)", "pages": [ "ko/tools/overview", { - "group": "\ud30c\uc77c & \ubb38\uc11c", + "group": "파일 & 문서", "icon": "folder-open", "pages": [ "ko/tools/file-document/overview", @@ -10962,7 +12360,7 @@ ] }, { - "group": "\uc6f9 \uc2a4\ud06c\ub798\ud551 & \ube0c\ub77c\uc6b0\uc9d5", + "group": "웹 스크래핑 & 브라우징", "icon": "globe", "pages": [ "ko/tools/web-scraping/overview", @@ -10982,7 +12380,7 @@ ] }, { - "group": "\uac80\uc0c9 \ubc0f \uc5f0\uad6c", + "group": "검색 및 연구", "icon": "magnifying-glass", "pages": [ "ko/tools/search-research/overview", @@ -11004,7 +12402,7 @@ ] }, { - "group": "\ub370\uc774\ud130\ubca0\uc774\uc2a4 & \ub370\uc774\ud130", + "group": "데이터베이스 & 데이터", "icon": "database", "pages": [ "ko/tools/database-data/overview", @@ -11019,7 +12417,7 @@ ] }, { - "group": "\uc778\uacf5\uc9c0\ub2a5 & \uba38\uc2e0\ub7ec\ub2dd", + "group": "인공지능 & 머신러닝", "icon": "brain", "pages": [ "ko/tools/ai-ml/overview", @@ -11033,7 +12431,7 @@ ] }, { - "group": "\ud074\ub77c\uc6b0\ub4dc & \uc2a4\ud1a0\ub9ac\uc9c0", + "group": "클라우드 & 스토리지", "icon": "cloud", "pages": [ "ko/tools/cloud-storage/overview", @@ -11052,7 +12450,7 @@ ] }, { - "group": "\uc790\ub3d9\ud654", + "group": "자동화", "icon": "bolt", "pages": [ "ko/tools/automation/overview", @@ -11087,7 +12485,7 @@ ] }, { - "group": "\ud559\uc2b5", + "group": "학습", "pages": [ "ko/learn/overview", "ko/learn/llm-selection-guide", @@ -11124,17 +12522,17 @@ ] }, { - "tab": "\uc5d4\ud130\ud504\ub77c\uc774\uc988", + "tab": "엔터프라이즈", "icon": "briefcase", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/enterprise/introduction" ] }, { - "group": "\ube4c\ub4dc", + "group": "빌드", "pages": [ "ko/enterprise/features/automations", "ko/enterprise/features/crew-studio", @@ -11145,7 +12543,7 @@ ] }, { - "group": "\uc6b4\uc601", + "group": "운영", "pages": [ "ko/enterprise/features/traces", "ko/enterprise/features/webhook-streaming", @@ -11154,13 +12552,13 @@ ] }, { - "group": "\uad00\ub9ac", + "group": "관리", "pages": [ "ko/enterprise/features/rbac" ] }, { - "group": "\ud1b5\ud569 \ubb38\uc11c", + "group": "통합 문서", "pages": [ "ko/enterprise/integrations/asana", "ko/enterprise/integrations/box", @@ -11212,7 +12610,7 @@ ] }, { - "group": "\ud2b8\ub9ac\uac70", + "group": "트리거", "pages": [ "ko/enterprise/guides/automation-triggers", "ko/enterprise/guides/gmail-trigger", @@ -11228,7 +12626,7 @@ ] }, { - "group": "\ud559\uc2b5 \uc790\uc6d0", + "group": "학습 자원", "pages": [ "ko/enterprise/resources/frequently-asked-questions" ] @@ -11236,11 +12634,11 @@ ] }, { - "tab": "API \ub808\ud37c\ub7f0\uc2a4", + "tab": "API 레퍼런스", "icon": "magnifying-glass", "groups": [ { - "group": "\uc2dc\uc791 \uc548\ub0b4", + "group": "시작 안내", "pages": [ "ko/api-reference/introduction", "ko/api-reference/inputs", @@ -11252,11 +12650,11 @@ ] }, { - "tab": "\uc608\uc2dc", + "tab": "예시", "icon": "code", "groups": [ { - "group": "\uc608\uc2dc", + "group": "예시", "pages": [ "ko/examples/example", "ko/examples/cookbooks" @@ -11265,11 +12663,11 @@ ] }, { - "tab": "\ubcc0\uacbd \ub85c\uadf8", + "tab": "변경 로그", "icon": "clock", "groups": [ { - "group": "\ub9b4\ub9ac\uc2a4 \ub178\ud2b8", + "group": "릴리스 노트", "pages": [ "ko/changelog" ] @@ -11285,17 +12683,17 @@ "global": { "anchors": [ { - "anchor": "\u0627\u0644\u0645\u0648\u0642\u0639", + "anchor": "الموقع", "href": "https://crewai.com", "icon": "globe" }, { - "anchor": "\u0627\u0644\u0645\u0646\u062a\u062f\u0649", + "anchor": "المنتدى", "href": "https://community.crewai.com", "icon": "discourse" }, { - "anchor": "\u0627\u0644\u0645\u062f\u0648\u0651\u0646\u0629", + "anchor": "المدوّنة", "href": "https://blog.crewai.com", "icon": "newspaper" }, @@ -11308,15 +12706,15 @@ }, "versions": [ { - "version": "v1.13.0", + "version": "v1.14.0", "default": true, "tabs": [ { - "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", + "tab": "الرئيسية", "icon": "house", "groups": [ { - "group": "\u0645\u0631\u062d\u0628\u0627\u064b", + "group": "مرحباً", "pages": [ "ar/index" ] @@ -11324,11 +12722,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", + "tab": "التقنية التوثيق", "icon": "book-open", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/introduction", "ar/installation", @@ -11336,31 +12734,31 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", + "group": "الأدلّة", "pages": [ { - "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", + "group": "الاستراتيجية", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", + "group": "الوكلاء", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", + "group": "الطواقم", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", + "group": "التدفقات", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -11368,21 +12766,21 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", + "group": "أدوات البرمجة", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "\u0645\u062a\u0642\u062f\u0651\u0645", + "group": "متقدّم", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -11390,7 +12788,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", + "group": "الترحيل", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -11399,7 +12797,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", + "group": "المفاهيم الأساسية", "pages": [ "ar/concepts/agents", "ar/concepts/agent-capabilities", @@ -11425,7 +12823,7 @@ ] }, { - "group": "\u062a\u0643\u0627\u0645\u0644 MCP", + "group": "تكامل MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -11437,11 +12835,11 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "pages": [ "ar/tools/overview", { - "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", + "group": "الملفات والمستندات", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -11461,7 +12859,7 @@ ] }, { - "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", + "group": "استخراج بيانات الويب", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -11481,7 +12879,7 @@ ] }, { - "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", + "group": "البحث والاستكشاف", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -11503,7 +12901,7 @@ ] }, { - "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", + "group": "قواعد البيانات", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -11518,7 +12916,7 @@ ] }, { - "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", + "group": "الذكاء الاصطناعي والتعلّم الآلي", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -11532,7 +12930,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", + "group": "التخزين السحابي", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -11551,7 +12949,7 @@ ] }, { - "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", + "group": "الأتمتة", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -11586,7 +12984,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "التعلّم", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -11623,17 +13021,17 @@ ] }, { - "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", + "tab": "المؤسسات", "icon": "briefcase", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "\u0627\u0644\u0628\u0646\u0627\u0621", + "group": "البناء", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -11644,7 +13042,7 @@ ] }, { - "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", + "group": "العمليات", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -11653,13 +13051,13 @@ ] }, { - "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", + "group": "الإدارة", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", + "group": "التكاملات", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -11711,7 +13109,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", + "group": "المشغّلات", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -11727,7 +13125,7 @@ ] }, { - "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "موارد التعلّم", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -11735,11 +13133,11 @@ ] }, { - "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", + "tab": "API المرجع", "icon": "magnifying-glass", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -11751,11 +13149,11 @@ ] }, { - "tab": "\u0623\u0645\u062b\u0644\u0629", + "tab": "أمثلة", "icon": "code", "groups": [ { - "group": "\u0623\u0645\u062b\u0644\u0629", + "group": "أمثلة", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -11764,11 +13162,480 @@ ] }, { - "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", + "tab": "التغييرات السجلات", "icon": "clock", "groups": [ { - "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", + "group": "سجل التغييرات", + "pages": [ + "ar/changelog" + ] + } + ] + } + ] + }, + { + "version": "v1.13.0", + "tabs": [ + { + "tab": "الرئيسية", + "icon": "house", + "groups": [ + { + "group": "مرحباً", + "pages": [ + "ar/index" + ] + } + ] + }, + { + "tab": "التقنية التوثيق", + "icon": "book-open", + "groups": [ + { + "group": "البدء", + "pages": [ + "ar/introduction", + "ar/installation", + "ar/quickstart" + ] + }, + { + "group": "الأدلّة", + "pages": [ + { + "group": "الاستراتيجية", + "icon": "compass", + "pages": [ + "ar/guides/concepts/evaluating-use-cases" + ] + }, + { + "group": "الوكلاء", + "icon": "user", + "pages": [ + "ar/guides/agents/crafting-effective-agents" + ] + }, + { + "group": "الطواقم", + "icon": "users", + "pages": [ + "ar/guides/crews/first-crew" + ] + }, + { + "group": "التدفقات", + "icon": "code-branch", + "pages": [ + "ar/guides/flows/first-flow", + "ar/guides/flows/mastering-flow-state" + ] + }, + { + "group": "الأدوات", + "icon": "wrench", + "pages": [ + "ar/guides/tools/publish-custom-tools" + ] + }, + { + "group": "أدوات البرمجة", + "icon": "terminal", + "pages": [ + "ar/guides/coding-tools/agents-md" + ] + }, + { + "group": "متقدّم", + "icon": "gear", + "pages": [ + "ar/guides/advanced/customizing-prompts", + "ar/guides/advanced/fingerprinting" + ] + }, + { + "group": "الترحيل", + "icon": "shuffle", + "pages": [ + "ar/guides/migration/migrating-from-langgraph" + ] + } + ] + }, + { + "group": "المفاهيم الأساسية", + "pages": [ + "ar/concepts/agents", + "ar/concepts/agent-capabilities", + "ar/concepts/tasks", + "ar/concepts/crews", + "ar/concepts/flows", + "ar/concepts/production-architecture", + "ar/concepts/knowledge", + "ar/concepts/skills", + "ar/concepts/llms", + "ar/concepts/files", + "ar/concepts/processes", + "ar/concepts/collaboration", + "ar/concepts/training", + "ar/concepts/memory", + "ar/concepts/reasoning", + "ar/concepts/planning", + "ar/concepts/testing", + "ar/concepts/cli", + "ar/concepts/tools", + "ar/concepts/event-listener", + "ar/concepts/checkpointing" + ] + }, + { + "group": "تكامل MCP", + "pages": [ + "ar/mcp/overview", + "ar/mcp/dsl-integration", + "ar/mcp/stdio", + "ar/mcp/sse", + "ar/mcp/streamable-http", + "ar/mcp/multiple-servers", + "ar/mcp/security" + ] + }, + { + "group": "الأدوات", + "pages": [ + "ar/tools/overview", + { + "group": "الملفات والمستندات", + "icon": "folder-open", + "pages": [ + "ar/tools/file-document/overview", + "ar/tools/file-document/filereadtool", + "ar/tools/file-document/filewritetool", + "ar/tools/file-document/pdfsearchtool", + "ar/tools/file-document/docxsearchtool", + "ar/tools/file-document/mdxsearchtool", + "ar/tools/file-document/xmlsearchtool", + "ar/tools/file-document/txtsearchtool", + "ar/tools/file-document/jsonsearchtool", + "ar/tools/file-document/csvsearchtool", + "ar/tools/file-document/directorysearchtool", + "ar/tools/file-document/directoryreadtool", + "ar/tools/file-document/ocrtool", + "ar/tools/file-document/pdf-text-writing-tool" + ] + }, + { + "group": "استخراج بيانات الويب", + "icon": "globe", + "pages": [ + "ar/tools/web-scraping/overview", + "ar/tools/web-scraping/scrapewebsitetool", + "ar/tools/web-scraping/scrapeelementfromwebsitetool", + "ar/tools/web-scraping/scrapflyscrapetool", + "ar/tools/web-scraping/seleniumscrapingtool", + "ar/tools/web-scraping/scrapegraphscrapetool", + "ar/tools/web-scraping/spidertool", + "ar/tools/web-scraping/browserbaseloadtool", + "ar/tools/web-scraping/hyperbrowserloadtool", + "ar/tools/web-scraping/stagehandtool", + "ar/tools/web-scraping/firecrawlcrawlwebsitetool", + "ar/tools/web-scraping/firecrawlscrapewebsitetool", + "ar/tools/web-scraping/oxylabsscraperstool", + "ar/tools/web-scraping/brightdata-tools" + ] + }, + { + "group": "البحث والاستكشاف", + "icon": "magnifying-glass", + "pages": [ + "ar/tools/search-research/overview", + "ar/tools/search-research/serperdevtool", + "ar/tools/search-research/bravesearchtool", + "ar/tools/search-research/exasearchtool", + "ar/tools/search-research/linkupsearchtool", + "ar/tools/search-research/githubsearchtool", + "ar/tools/search-research/websitesearchtool", + "ar/tools/search-research/codedocssearchtool", + "ar/tools/search-research/youtubechannelsearchtool", + "ar/tools/search-research/youtubevideosearchtool", + "ar/tools/search-research/tavilysearchtool", + "ar/tools/search-research/tavilyextractortool", + "ar/tools/search-research/arxivpapertool", + "ar/tools/search-research/serpapi-googlesearchtool", + "ar/tools/search-research/serpapi-googleshoppingtool", + "ar/tools/search-research/databricks-query-tool" + ] + }, + { + "group": "قواعد البيانات", + "icon": "database", + "pages": [ + "ar/tools/database-data/overview", + "ar/tools/database-data/mysqltool", + "ar/tools/database-data/pgsearchtool", + "ar/tools/database-data/snowflakesearchtool", + "ar/tools/database-data/nl2sqltool", + "ar/tools/database-data/qdrantvectorsearchtool", + "ar/tools/database-data/weaviatevectorsearchtool", + "ar/tools/database-data/mongodbvectorsearchtool", + "ar/tools/database-data/singlestoresearchtool" + ] + }, + { + "group": "الذكاء الاصطناعي والتعلّم الآلي", + "icon": "brain", + "pages": [ + "ar/tools/ai-ml/overview", + "ar/tools/ai-ml/dalletool", + "ar/tools/ai-ml/visiontool", + "ar/tools/ai-ml/aimindtool", + "ar/tools/ai-ml/llamaindextool", + "ar/tools/ai-ml/langchaintool", + "ar/tools/ai-ml/ragtool", + "ar/tools/ai-ml/codeinterpretertool" + ] + }, + { + "group": "التخزين السحابي", + "icon": "cloud", + "pages": [ + "ar/tools/cloud-storage/overview", + "ar/tools/cloud-storage/s3readertool", + "ar/tools/cloud-storage/s3writertool", + "ar/tools/cloud-storage/bedrockkbretriever" + ] + }, + { + "group": "Integrations", + "icon": "plug", + "pages": [ + "ar/tools/integration/overview", + "ar/tools/integration/bedrockinvokeagenttool", + "ar/tools/integration/crewaiautomationtool" + ] + }, + { + "group": "الأتمتة", + "icon": "bolt", + "pages": [ + "ar/tools/automation/overview", + "ar/tools/automation/apifyactorstool", + "ar/tools/automation/composiotool", + "ar/tools/automation/multiontool", + "ar/tools/automation/zapieractionstool" + ] + } + ] + }, + { + "group": "Observability", + "pages": [ + "ar/observability/tracing", + "ar/observability/overview", + "ar/observability/arize-phoenix", + "ar/observability/braintrust", + "ar/observability/datadog", + "ar/observability/galileo", + "ar/observability/langdb", + "ar/observability/langfuse", + "ar/observability/langtrace", + "ar/observability/maxim", + "ar/observability/mlflow", + "ar/observability/neatlogs", + "ar/observability/openlit", + "ar/observability/opik", + "ar/observability/patronus-evaluation", + "ar/observability/portkey", + "ar/observability/weave" + ] + }, + { + "group": "التعلّم", + "pages": [ + "ar/learn/overview", + "ar/learn/llm-selection-guide", + "ar/learn/conditional-tasks", + "ar/learn/coding-agents", + "ar/learn/create-custom-tools", + "ar/learn/custom-llm", + "ar/learn/custom-manager-agent", + "ar/learn/customizing-agents", + "ar/learn/dalle-image-generation", + "ar/learn/force-tool-output-as-result", + "ar/learn/hierarchical-process", + "ar/learn/human-input-on-execution", + "ar/learn/human-in-the-loop", + "ar/learn/human-feedback-in-flows", + "ar/learn/kickoff-async", + "ar/learn/kickoff-for-each", + "ar/learn/llm-connections", + "ar/learn/multimodal-agents", + "ar/learn/replay-tasks-from-latest-crew-kickoff", + "ar/learn/sequential-process", + "ar/learn/using-annotations", + "ar/learn/execution-hooks", + "ar/learn/llm-hooks", + "ar/learn/tool-hooks" + ] + }, + { + "group": "Telemetry", + "pages": [ + "ar/telemetry" + ] + } + ] + }, + { + "tab": "المؤسسات", + "icon": "briefcase", + "groups": [ + { + "group": "البدء", + "pages": [ + "ar/enterprise/introduction" + ] + }, + { + "group": "البناء", + "pages": [ + "ar/enterprise/features/automations", + "ar/enterprise/features/crew-studio", + "ar/enterprise/features/marketplace", + "ar/enterprise/features/agent-repositories", + "ar/enterprise/features/tools-and-integrations", + "ar/enterprise/features/pii-trace-redactions" + ] + }, + { + "group": "العمليات", + "pages": [ + "ar/enterprise/features/traces", + "ar/enterprise/features/webhook-streaming", + "ar/enterprise/features/hallucination-guardrail", + "ar/enterprise/features/flow-hitl-management" + ] + }, + { + "group": "الإدارة", + "pages": [ + "ar/enterprise/features/rbac" + ] + }, + { + "group": "التكاملات", + "pages": [ + "ar/enterprise/integrations/asana", + "ar/enterprise/integrations/box", + "ar/enterprise/integrations/clickup", + "ar/enterprise/integrations/github", + "ar/enterprise/integrations/gmail", + "ar/enterprise/integrations/google_calendar", + "ar/enterprise/integrations/google_contacts", + "ar/enterprise/integrations/google_docs", + "ar/enterprise/integrations/google_drive", + "ar/enterprise/integrations/google_sheets", + "ar/enterprise/integrations/google_slides", + "ar/enterprise/integrations/hubspot", + "ar/enterprise/integrations/jira", + "ar/enterprise/integrations/linear", + "ar/enterprise/integrations/microsoft_excel", + "ar/enterprise/integrations/microsoft_onedrive", + "ar/enterprise/integrations/microsoft_outlook", + "ar/enterprise/integrations/microsoft_sharepoint", + "ar/enterprise/integrations/microsoft_teams", + "ar/enterprise/integrations/microsoft_word", + "ar/enterprise/integrations/notion", + "ar/enterprise/integrations/salesforce", + "ar/enterprise/integrations/shopify", + "ar/enterprise/integrations/slack", + "ar/enterprise/integrations/stripe", + "ar/enterprise/integrations/zendesk" + ] + }, + { + "group": "How-To Guides", + "pages": [ + "ar/enterprise/guides/build-crew", + "ar/enterprise/guides/prepare-for-deployment", + "ar/enterprise/guides/deploy-to-amp", + "ar/enterprise/guides/private-package-registry", + "ar/enterprise/guides/kickoff-crew", + "ar/enterprise/guides/training-crews", + "ar/enterprise/guides/update-crew", + "ar/enterprise/guides/enable-crew-studio", + "ar/enterprise/guides/capture_telemetry_logs", + "ar/enterprise/guides/azure-openai-setup", + "ar/enterprise/guides/tool-repository", + "ar/enterprise/guides/custom-mcp-server", + "ar/enterprise/guides/react-component-export", + "ar/enterprise/guides/team-management", + "ar/enterprise/guides/human-in-the-loop", + "ar/enterprise/guides/webhook-automation" + ] + }, + { + "group": "المشغّلات", + "pages": [ + "ar/enterprise/guides/automation-triggers", + "ar/enterprise/guides/gmail-trigger", + "ar/enterprise/guides/google-calendar-trigger", + "ar/enterprise/guides/google-drive-trigger", + "ar/enterprise/guides/outlook-trigger", + "ar/enterprise/guides/onedrive-trigger", + "ar/enterprise/guides/microsoft-teams-trigger", + "ar/enterprise/guides/slack-trigger", + "ar/enterprise/guides/hubspot-trigger", + "ar/enterprise/guides/salesforce-trigger", + "ar/enterprise/guides/zapier-trigger" + ] + }, + { + "group": "موارد التعلّم", + "pages": [ + "ar/enterprise/resources/frequently-asked-questions" + ] + } + ] + }, + { + "tab": "API المرجع", + "icon": "magnifying-glass", + "groups": [ + { + "group": "البدء", + "pages": [ + "ar/api-reference/introduction", + "ar/api-reference/inputs", + "ar/api-reference/kickoff", + "ar/api-reference/resume", + "ar/api-reference/status" + ] + } + ] + }, + { + "tab": "أمثلة", + "icon": "code", + "groups": [ + { + "group": "أمثلة", + "pages": [ + "ar/examples/example", + "ar/examples/cookbooks" + ] + } + ] + }, + { + "tab": "التغييرات السجلات", + "icon": "clock", + "groups": [ + { + "group": "سجل التغييرات", "pages": [ "ar/changelog" ] @@ -11781,11 +13648,11 @@ "version": "v1.12.2", "tabs": [ { - "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", + "tab": "الرئيسية", "icon": "house", "groups": [ { - "group": "\u0645\u0631\u062d\u0628\u0627\u064b", + "group": "مرحباً", "pages": [ "ar/index" ] @@ -11793,11 +13660,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", + "tab": "التقنية التوثيق", "icon": "book-open", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/introduction", "ar/installation", @@ -11805,31 +13672,31 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", + "group": "الأدلّة", "pages": [ { - "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", + "group": "الاستراتيجية", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", + "group": "الوكلاء", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", + "group": "الطواقم", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", + "group": "التدفقات", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -11837,21 +13704,21 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", + "group": "أدوات البرمجة", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "\u0645\u062a\u0642\u062f\u0651\u0645", + "group": "متقدّم", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -11859,7 +13726,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", + "group": "الترحيل", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -11868,7 +13735,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", + "group": "المفاهيم الأساسية", "pages": [ "ar/concepts/agents", "ar/concepts/agent-capabilities", @@ -11894,7 +13761,7 @@ ] }, { - "group": "\u062a\u0643\u0627\u0645\u0644 MCP", + "group": "تكامل MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -11906,11 +13773,11 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "pages": [ "ar/tools/overview", { - "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", + "group": "الملفات والمستندات", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -11930,7 +13797,7 @@ ] }, { - "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", + "group": "استخراج بيانات الويب", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -11950,7 +13817,7 @@ ] }, { - "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", + "group": "البحث والاستكشاف", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -11972,7 +13839,7 @@ ] }, { - "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", + "group": "قواعد البيانات", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -11987,7 +13854,7 @@ ] }, { - "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", + "group": "الذكاء الاصطناعي والتعلّم الآلي", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -12001,7 +13868,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", + "group": "التخزين السحابي", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -12020,7 +13887,7 @@ ] }, { - "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", + "group": "الأتمتة", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -12055,7 +13922,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "التعلّم", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -12092,17 +13959,17 @@ ] }, { - "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", + "tab": "المؤسسات", "icon": "briefcase", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "\u0627\u0644\u0628\u0646\u0627\u0621", + "group": "البناء", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -12113,7 +13980,7 @@ ] }, { - "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", + "group": "العمليات", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -12122,13 +13989,13 @@ ] }, { - "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", + "group": "الإدارة", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", + "group": "التكاملات", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -12180,7 +14047,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", + "group": "المشغّلات", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -12196,7 +14063,7 @@ ] }, { - "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "موارد التعلّم", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -12204,11 +14071,11 @@ ] }, { - "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", + "tab": "API المرجع", "icon": "magnifying-glass", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -12220,11 +14087,11 @@ ] }, { - "tab": "\u0623\u0645\u062b\u0644\u0629", + "tab": "أمثلة", "icon": "code", "groups": [ { - "group": "\u0623\u0645\u062b\u0644\u0629", + "group": "أمثلة", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -12233,11 +14100,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", + "tab": "التغييرات السجلات", "icon": "clock", "groups": [ { - "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", + "group": "سجل التغييرات", "pages": [ "ar/changelog" ] @@ -12250,11 +14117,11 @@ "version": "v1.12.1", "tabs": [ { - "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", + "tab": "الرئيسية", "icon": "house", "groups": [ { - "group": "\u0645\u0631\u062d\u0628\u0627\u064b", + "group": "مرحباً", "pages": [ "ar/index" ] @@ -12262,11 +14129,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", + "tab": "التقنية التوثيق", "icon": "book-open", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/introduction", "ar/installation", @@ -12274,31 +14141,31 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", + "group": "الأدلّة", "pages": [ { - "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", + "group": "الاستراتيجية", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", + "group": "الوكلاء", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", + "group": "الطواقم", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", + "group": "التدفقات", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -12306,21 +14173,21 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", + "group": "أدوات البرمجة", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "\u0645\u062a\u0642\u062f\u0651\u0645", + "group": "متقدّم", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -12328,7 +14195,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", + "group": "الترحيل", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -12337,7 +14204,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", + "group": "المفاهيم الأساسية", "pages": [ "ar/concepts/agents", "ar/concepts/tasks", @@ -12362,7 +14229,7 @@ ] }, { - "group": "\u062a\u0643\u0627\u0645\u0644 MCP", + "group": "تكامل MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -12374,11 +14241,11 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "pages": [ "ar/tools/overview", { - "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", + "group": "الملفات والمستندات", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -12398,7 +14265,7 @@ ] }, { - "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", + "group": "استخراج بيانات الويب", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -12418,7 +14285,7 @@ ] }, { - "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", + "group": "البحث والاستكشاف", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -12440,7 +14307,7 @@ ] }, { - "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", + "group": "قواعد البيانات", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -12455,7 +14322,7 @@ ] }, { - "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", + "group": "الذكاء الاصطناعي والتعلّم الآلي", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -12469,7 +14336,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", + "group": "التخزين السحابي", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -12488,7 +14355,7 @@ ] }, { - "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", + "group": "الأتمتة", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -12523,7 +14390,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "التعلّم", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -12560,17 +14427,17 @@ ] }, { - "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", + "tab": "المؤسسات", "icon": "briefcase", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "\u0627\u0644\u0628\u0646\u0627\u0621", + "group": "البناء", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -12581,7 +14448,7 @@ ] }, { - "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", + "group": "العمليات", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -12590,13 +14457,13 @@ ] }, { - "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", + "group": "الإدارة", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", + "group": "التكاملات", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -12648,7 +14515,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", + "group": "المشغّلات", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -12664,7 +14531,7 @@ ] }, { - "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "موارد التعلّم", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -12672,11 +14539,11 @@ ] }, { - "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", + "tab": "API المرجع", "icon": "magnifying-glass", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -12688,11 +14555,11 @@ ] }, { - "tab": "\u0623\u0645\u062b\u0644\u0629", + "tab": "أمثلة", "icon": "code", "groups": [ { - "group": "\u0623\u0645\u062b\u0644\u0629", + "group": "أمثلة", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -12701,11 +14568,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", + "tab": "التغييرات السجلات", "icon": "clock", "groups": [ { - "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", + "group": "سجل التغييرات", "pages": [ "ar/changelog" ] @@ -12718,11 +14585,11 @@ "version": "v1.12.0", "tabs": [ { - "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", + "tab": "الرئيسية", "icon": "house", "groups": [ { - "group": "\u0645\u0631\u062d\u0628\u0627\u064b", + "group": "مرحباً", "pages": [ "ar/index" ] @@ -12730,11 +14597,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", + "tab": "التقنية التوثيق", "icon": "book-open", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/introduction", "ar/installation", @@ -12742,31 +14609,31 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", + "group": "الأدلّة", "pages": [ { - "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", + "group": "الاستراتيجية", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", + "group": "الوكلاء", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", + "group": "الطواقم", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", + "group": "التدفقات", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -12774,21 +14641,21 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", + "group": "أدوات البرمجة", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "\u0645\u062a\u0642\u062f\u0651\u0645", + "group": "متقدّم", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -12796,7 +14663,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", + "group": "الترحيل", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -12805,7 +14672,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", + "group": "المفاهيم الأساسية", "pages": [ "ar/concepts/agents", "ar/concepts/tasks", @@ -12830,7 +14697,7 @@ ] }, { - "group": "\u062a\u0643\u0627\u0645\u0644 MCP", + "group": "تكامل MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -12842,11 +14709,11 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "pages": [ "ar/tools/overview", { - "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", + "group": "الملفات والمستندات", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -12866,7 +14733,7 @@ ] }, { - "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", + "group": "استخراج بيانات الويب", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -12886,7 +14753,7 @@ ] }, { - "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", + "group": "البحث والاستكشاف", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -12908,7 +14775,7 @@ ] }, { - "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", + "group": "قواعد البيانات", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -12923,7 +14790,7 @@ ] }, { - "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", + "group": "الذكاء الاصطناعي والتعلّم الآلي", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -12937,7 +14804,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", + "group": "التخزين السحابي", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -12956,7 +14823,7 @@ ] }, { - "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", + "group": "الأتمتة", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -12991,7 +14858,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "التعلّم", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -13028,17 +14895,17 @@ ] }, { - "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", + "tab": "المؤسسات", "icon": "briefcase", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "\u0627\u0644\u0628\u0646\u0627\u0621", + "group": "البناء", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -13049,7 +14916,7 @@ ] }, { - "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", + "group": "العمليات", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -13058,13 +14925,13 @@ ] }, { - "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", + "group": "الإدارة", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", + "group": "التكاملات", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -13116,7 +14983,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", + "group": "المشغّلات", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -13132,7 +14999,7 @@ ] }, { - "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "موارد التعلّم", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -13140,11 +15007,11 @@ ] }, { - "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", + "tab": "API المرجع", "icon": "magnifying-glass", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -13156,11 +15023,11 @@ ] }, { - "tab": "\u0623\u0645\u062b\u0644\u0629", + "tab": "أمثلة", "icon": "code", "groups": [ { - "group": "\u0623\u0645\u062b\u0644\u0629", + "group": "أمثلة", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -13169,11 +15036,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", + "tab": "التغييرات السجلات", "icon": "clock", "groups": [ { - "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", + "group": "سجل التغييرات", "pages": [ "ar/changelog" ] @@ -13186,11 +15053,11 @@ "version": "v1.11.1", "tabs": [ { - "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", + "tab": "الرئيسية", "icon": "house", "groups": [ { - "group": "\u0645\u0631\u062d\u0628\u0627\u064b", + "group": "مرحباً", "pages": [ "ar/index" ] @@ -13198,11 +15065,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", + "tab": "التقنية التوثيق", "icon": "book-open", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/introduction", "ar/installation", @@ -13210,31 +15077,31 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", + "group": "الأدلّة", "pages": [ { - "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", + "group": "الاستراتيجية", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", + "group": "الوكلاء", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", + "group": "الطواقم", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", + "group": "التدفقات", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -13242,21 +15109,21 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", + "group": "أدوات البرمجة", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "\u0645\u062a\u0642\u062f\u0651\u0645", + "group": "متقدّم", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -13264,7 +15131,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", + "group": "الترحيل", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -13273,7 +15140,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", + "group": "المفاهيم الأساسية", "pages": [ "ar/concepts/agents", "ar/concepts/tasks", @@ -13298,7 +15165,7 @@ ] }, { - "group": "\u062a\u0643\u0627\u0645\u0644 MCP", + "group": "تكامل MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -13310,11 +15177,11 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "pages": [ "ar/tools/overview", { - "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", + "group": "الملفات والمستندات", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -13334,7 +15201,7 @@ ] }, { - "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", + "group": "استخراج بيانات الويب", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -13354,7 +15221,7 @@ ] }, { - "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", + "group": "البحث والاستكشاف", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -13376,7 +15243,7 @@ ] }, { - "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", + "group": "قواعد البيانات", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -13391,7 +15258,7 @@ ] }, { - "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", + "group": "الذكاء الاصطناعي والتعلّم الآلي", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -13405,7 +15272,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", + "group": "التخزين السحابي", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -13424,7 +15291,7 @@ ] }, { - "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", + "group": "الأتمتة", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -13459,7 +15326,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "التعلّم", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -13496,17 +15363,17 @@ ] }, { - "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", + "tab": "المؤسسات", "icon": "briefcase", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "\u0627\u0644\u0628\u0646\u0627\u0621", + "group": "البناء", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -13517,7 +15384,7 @@ ] }, { - "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", + "group": "العمليات", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -13526,13 +15393,13 @@ ] }, { - "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", + "group": "الإدارة", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", + "group": "التكاملات", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -13584,7 +15451,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", + "group": "المشغّلات", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -13600,7 +15467,7 @@ ] }, { - "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "موارد التعلّم", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -13608,11 +15475,11 @@ ] }, { - "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", + "tab": "API المرجع", "icon": "magnifying-glass", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -13624,11 +15491,11 @@ ] }, { - "tab": "\u0623\u0645\u062b\u0644\u0629", + "tab": "أمثلة", "icon": "code", "groups": [ { - "group": "\u0623\u0645\u062b\u0644\u0629", + "group": "أمثلة", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -13637,11 +15504,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", + "tab": "التغييرات السجلات", "icon": "clock", "groups": [ { - "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", + "group": "سجل التغييرات", "pages": [ "ar/changelog" ] @@ -13654,11 +15521,11 @@ "version": "v1.11.0", "tabs": [ { - "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", + "tab": "الرئيسية", "icon": "house", "groups": [ { - "group": "\u0645\u0631\u062d\u0628\u0627\u064b", + "group": "مرحباً", "pages": [ "ar/index" ] @@ -13666,11 +15533,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", + "tab": "التقنية التوثيق", "icon": "book-open", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/introduction", "ar/installation", @@ -13678,31 +15545,31 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", + "group": "الأدلّة", "pages": [ { - "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", + "group": "الاستراتيجية", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", + "group": "الوكلاء", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", + "group": "الطواقم", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", + "group": "التدفقات", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -13710,21 +15577,21 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", + "group": "أدوات البرمجة", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "\u0645\u062a\u0642\u062f\u0651\u0645", + "group": "متقدّم", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -13732,7 +15599,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", + "group": "الترحيل", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -13741,7 +15608,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", + "group": "المفاهيم الأساسية", "pages": [ "ar/concepts/agents", "ar/concepts/tasks", @@ -13765,7 +15632,7 @@ ] }, { - "group": "\u062a\u0643\u0627\u0645\u0644 MCP", + "group": "تكامل MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -13777,11 +15644,11 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "pages": [ "ar/tools/overview", { - "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", + "group": "الملفات والمستندات", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -13801,7 +15668,7 @@ ] }, { - "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", + "group": "استخراج بيانات الويب", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -13821,7 +15688,7 @@ ] }, { - "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", + "group": "البحث والاستكشاف", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -13843,7 +15710,7 @@ ] }, { - "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", + "group": "قواعد البيانات", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -13858,7 +15725,7 @@ ] }, { - "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", + "group": "الذكاء الاصطناعي والتعلّم الآلي", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -13872,7 +15739,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", + "group": "التخزين السحابي", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -13891,7 +15758,7 @@ ] }, { - "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", + "group": "الأتمتة", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -13926,7 +15793,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "التعلّم", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -13963,17 +15830,17 @@ ] }, { - "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", + "tab": "المؤسسات", "icon": "briefcase", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "\u0627\u0644\u0628\u0646\u0627\u0621", + "group": "البناء", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -13984,7 +15851,7 @@ ] }, { - "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", + "group": "العمليات", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -13993,13 +15860,13 @@ ] }, { - "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", + "group": "الإدارة", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", + "group": "التكاملات", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -14051,7 +15918,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", + "group": "المشغّلات", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -14067,7 +15934,7 @@ ] }, { - "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "موارد التعلّم", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -14075,11 +15942,11 @@ ] }, { - "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", + "tab": "API المرجع", "icon": "magnifying-glass", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -14091,11 +15958,11 @@ ] }, { - "tab": "\u0623\u0645\u062b\u0644\u0629", + "tab": "أمثلة", "icon": "code", "groups": [ { - "group": "\u0623\u0645\u062b\u0644\u0629", + "group": "أمثلة", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -14104,11 +15971,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", + "tab": "التغييرات السجلات", "icon": "clock", "groups": [ { - "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", + "group": "سجل التغييرات", "pages": [ "ar/changelog" ] @@ -14121,11 +15988,11 @@ "version": "v1.10.1", "tabs": [ { - "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", + "tab": "الرئيسية", "icon": "house", "groups": [ { - "group": "\u0645\u0631\u062d\u0628\u0627\u064b", + "group": "مرحباً", "pages": [ "ar/index" ] @@ -14133,11 +16000,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", + "tab": "التقنية التوثيق", "icon": "book-open", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/introduction", "ar/installation", @@ -14145,31 +16012,31 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", + "group": "الأدلّة", "pages": [ { - "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", + "group": "الاستراتيجية", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", + "group": "الوكلاء", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", + "group": "الطواقم", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", + "group": "التدفقات", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -14177,21 +16044,21 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", + "group": "أدوات البرمجة", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "\u0645\u062a\u0642\u062f\u0651\u0645", + "group": "متقدّم", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -14199,7 +16066,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", + "group": "الترحيل", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -14208,7 +16075,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", + "group": "المفاهيم الأساسية", "pages": [ "ar/concepts/agents", "ar/concepts/tasks", @@ -14232,7 +16099,7 @@ ] }, { - "group": "\u062a\u0643\u0627\u0645\u0644 MCP", + "group": "تكامل MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -14244,11 +16111,11 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "pages": [ "ar/tools/overview", { - "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", + "group": "الملفات والمستندات", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -14268,7 +16135,7 @@ ] }, { - "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", + "group": "استخراج بيانات الويب", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -14288,7 +16155,7 @@ ] }, { - "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", + "group": "البحث والاستكشاف", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -14310,7 +16177,7 @@ ] }, { - "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", + "group": "قواعد البيانات", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -14325,7 +16192,7 @@ ] }, { - "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", + "group": "الذكاء الاصطناعي والتعلّم الآلي", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -14339,7 +16206,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", + "group": "التخزين السحابي", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -14358,7 +16225,7 @@ ] }, { - "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", + "group": "الأتمتة", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -14393,7 +16260,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "التعلّم", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -14430,17 +16297,17 @@ ] }, { - "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", + "tab": "المؤسسات", "icon": "briefcase", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "\u0627\u0644\u0628\u0646\u0627\u0621", + "group": "البناء", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -14451,7 +16318,7 @@ ] }, { - "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", + "group": "العمليات", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -14460,13 +16327,13 @@ ] }, { - "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", + "group": "الإدارة", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", + "group": "التكاملات", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -14518,7 +16385,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", + "group": "المشغّلات", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -14534,7 +16401,7 @@ ] }, { - "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "موارد التعلّم", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -14542,11 +16409,11 @@ ] }, { - "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", + "tab": "API المرجع", "icon": "magnifying-glass", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -14558,11 +16425,11 @@ ] }, { - "tab": "\u0623\u0645\u062b\u0644\u0629", + "tab": "أمثلة", "icon": "code", "groups": [ { - "group": "\u0623\u0645\u062b\u0644\u0629", + "group": "أمثلة", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -14571,11 +16438,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", + "tab": "التغييرات السجلات", "icon": "clock", "groups": [ { - "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", + "group": "سجل التغييرات", "pages": [ "ar/changelog" ] @@ -14588,11 +16455,11 @@ "version": "v1.10.0", "tabs": [ { - "tab": "\u0627\u0644\u0631\u0626\u064a\u0633\u064a\u0629", + "tab": "الرئيسية", "icon": "house", "groups": [ { - "group": "\u0645\u0631\u062d\u0628\u0627\u064b", + "group": "مرحباً", "pages": [ "ar/index" ] @@ -14600,11 +16467,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u0642\u0646\u064a\u0629 \u0627\u0644\u062a\u0648\u062b\u064a\u0642", + "tab": "التقنية التوثيق", "icon": "book-open", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/introduction", "ar/installation", @@ -14612,31 +16479,31 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0644\u0651\u0629", + "group": "الأدلّة", "pages": [ { - "group": "\u0627\u0644\u0627\u0633\u062a\u0631\u0627\u062a\u064a\u062c\u064a\u0629", + "group": "الاستراتيجية", "icon": "compass", "pages": [ "ar/guides/concepts/evaluating-use-cases" ] }, { - "group": "\u0627\u0644\u0648\u0643\u0644\u0627\u0621", + "group": "الوكلاء", "icon": "user", "pages": [ "ar/guides/agents/crafting-effective-agents" ] }, { - "group": "\u0627\u0644\u0637\u0648\u0627\u0642\u0645", + "group": "الطواقم", "icon": "users", "pages": [ "ar/guides/crews/first-crew" ] }, { - "group": "\u0627\u0644\u062a\u062f\u0641\u0642\u0627\u062a", + "group": "التدفقات", "icon": "code-branch", "pages": [ "ar/guides/flows/first-flow", @@ -14644,21 +16511,21 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "icon": "wrench", "pages": [ "ar/guides/tools/publish-custom-tools" ] }, { - "group": "\u0623\u062f\u0648\u0627\u062a \u0627\u0644\u0628\u0631\u0645\u062c\u0629", + "group": "أدوات البرمجة", "icon": "terminal", "pages": [ "ar/guides/coding-tools/agents-md" ] }, { - "group": "\u0645\u062a\u0642\u062f\u0651\u0645", + "group": "متقدّم", "icon": "gear", "pages": [ "ar/guides/advanced/customizing-prompts", @@ -14666,7 +16533,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0631\u062d\u064a\u0644", + "group": "الترحيل", "icon": "shuffle", "pages": [ "ar/guides/migration/migrating-from-langgraph" @@ -14675,7 +16542,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0641\u0627\u0647\u064a\u0645 \u0627\u0644\u0623\u0633\u0627\u0633\u064a\u0629", + "group": "المفاهيم الأساسية", "pages": [ "ar/concepts/agents", "ar/concepts/tasks", @@ -14700,7 +16567,7 @@ ] }, { - "group": "\u062a\u0643\u0627\u0645\u0644 MCP", + "group": "تكامل MCP", "pages": [ "ar/mcp/overview", "ar/mcp/dsl-integration", @@ -14712,11 +16579,11 @@ ] }, { - "group": "\u0627\u0644\u0623\u062f\u0648\u0627\u062a", + "group": "الأدوات", "pages": [ "ar/tools/overview", { - "group": "\u0627\u0644\u0645\u0644\u0641\u0627\u062a \u0648\u0627\u0644\u0645\u0633\u062a\u0646\u062f\u0627\u062a", + "group": "الملفات والمستندات", "icon": "folder-open", "pages": [ "ar/tools/file-document/overview", @@ -14736,7 +16603,7 @@ ] }, { - "group": "\u0627\u0633\u062a\u062e\u0631\u0627\u062c \u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0648\u064a\u0628", + "group": "استخراج بيانات الويب", "icon": "globe", "pages": [ "ar/tools/web-scraping/overview", @@ -14756,7 +16623,7 @@ ] }, { - "group": "\u0627\u0644\u0628\u062d\u062b \u0648\u0627\u0644\u0627\u0633\u062a\u0643\u0634\u0627\u0641", + "group": "البحث والاستكشاف", "icon": "magnifying-glass", "pages": [ "ar/tools/search-research/overview", @@ -14778,7 +16645,7 @@ ] }, { - "group": "\u0642\u0648\u0627\u0639\u062f \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a", + "group": "قواعد البيانات", "icon": "database", "pages": [ "ar/tools/database-data/overview", @@ -14793,7 +16660,7 @@ ] }, { - "group": "\u0627\u0644\u0630\u0643\u0627\u0621 \u0627\u0644\u0627\u0635\u0637\u0646\u0627\u0639\u064a \u0648\u0627\u0644\u062a\u0639\u0644\u0651\u0645 \u0627\u0644\u0622\u0644\u064a", + "group": "الذكاء الاصطناعي والتعلّم الآلي", "icon": "brain", "pages": [ "ar/tools/ai-ml/overview", @@ -14807,7 +16674,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0633\u062d\u0627\u0628\u064a", + "group": "التخزين السحابي", "icon": "cloud", "pages": [ "ar/tools/cloud-storage/overview", @@ -14826,7 +16693,7 @@ ] }, { - "group": "\u0627\u0644\u0623\u062a\u0645\u062a\u0629", + "group": "الأتمتة", "icon": "bolt", "pages": [ "ar/tools/automation/overview", @@ -14861,7 +16728,7 @@ ] }, { - "group": "\u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "التعلّم", "pages": [ "ar/learn/overview", "ar/learn/llm-selection-guide", @@ -14898,17 +16765,17 @@ ] }, { - "tab": "\u0627\u0644\u0645\u0624\u0633\u0633\u0627\u062a", + "tab": "المؤسسات", "icon": "briefcase", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/enterprise/introduction" ] }, { - "group": "\u0627\u0644\u0628\u0646\u0627\u0621", + "group": "البناء", "pages": [ "ar/enterprise/features/automations", "ar/enterprise/features/crew-studio", @@ -14919,7 +16786,7 @@ ] }, { - "group": "\u0627\u0644\u0639\u0645\u0644\u064a\u0627\u062a", + "group": "العمليات", "pages": [ "ar/enterprise/features/traces", "ar/enterprise/features/webhook-streaming", @@ -14928,13 +16795,13 @@ ] }, { - "group": "\u0627\u0644\u0625\u062f\u0627\u0631\u0629", + "group": "الإدارة", "pages": [ "ar/enterprise/features/rbac" ] }, { - "group": "\u0627\u0644\u062a\u0643\u0627\u0645\u0644\u0627\u062a", + "group": "التكاملات", "pages": [ "ar/enterprise/integrations/asana", "ar/enterprise/integrations/box", @@ -14986,7 +16853,7 @@ ] }, { - "group": "\u0627\u0644\u0645\u0634\u063a\u0651\u0644\u0627\u062a", + "group": "المشغّلات", "pages": [ "ar/enterprise/guides/automation-triggers", "ar/enterprise/guides/gmail-trigger", @@ -15002,7 +16869,7 @@ ] }, { - "group": "\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062a\u0639\u0644\u0651\u0645", + "group": "موارد التعلّم", "pages": [ "ar/enterprise/resources/frequently-asked-questions" ] @@ -15010,11 +16877,11 @@ ] }, { - "tab": "API \u0627\u0644\u0645\u0631\u062c\u0639", + "tab": "API المرجع", "icon": "magnifying-glass", "groups": [ { - "group": "\u0627\u0644\u0628\u062f\u0621", + "group": "البدء", "pages": [ "ar/api-reference/introduction", "ar/api-reference/inputs", @@ -15026,11 +16893,11 @@ ] }, { - "tab": "\u0623\u0645\u062b\u0644\u0629", + "tab": "أمثلة", "icon": "code", "groups": [ { - "group": "\u0623\u0645\u062b\u0644\u0629", + "group": "أمثلة", "pages": [ "ar/examples/example", "ar/examples/cookbooks" @@ -15039,11 +16906,11 @@ ] }, { - "tab": "\u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a \u0627\u0644\u0633\u062c\u0644\u0627\u062a", + "tab": "التغييرات السجلات", "icon": "clock", "groups": [ { - "group": "\u0633\u062c\u0644 \u0627\u0644\u062a\u063a\u064a\u064a\u0631\u0627\u062a", + "group": "سجل التغييرات", "pages": [ "ar/changelog" ] diff --git a/docs/en/changelog.mdx b/docs/en/changelog.mdx index b2ab728a7..891d9fc8b 100644 --- a/docs/en/changelog.mdx +++ b/docs/en/changelog.mdx @@ -4,6 +4,44 @@ description: "Product updates, improvements, and bug fixes for CrewAI" icon: "clock" mode: "wide" --- + + ## v1.14.0 + + [View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.0) + + ## What's Changed + + ### Features + - Add checkpoint list/info CLI commands + - Add guardrail_type and name to distinguish traces + - Add SqliteProvider for checkpoint storage + - Add CheckpointConfig for automatic checkpointing + - Implement runtime state checkpointing, event system, and executor refactor + + ### Bug Fixes + - Add SSRF and path traversal protections + - Add path and URL validation to RAG tools + - Exclude embedding vectors from memory serialization to save tokens + - Ensure output directory exists before writing in flow template + - Bump litellm to >=1.83.0 to address CVE-2026-35030 + - Remove SEO indexing field causing Arabic page rendering + + ### Documentation + - Update changelog and version for v1.14.0 + - Update quickstart and installation guides for improved clarity + - Add storage providers section, export JsonProvider + - Add AMP Training Tab guide + + ### Refactoring + - Clean up checkpoint API + - Remove CodeInterpreterTool and deprecate code execution parameters + + ## Contributors + + @alex-clawd, @github-actions[bot], @greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay, @lucasgomide + + + ## v1.14.0a4 diff --git a/docs/ko/changelog.mdx b/docs/ko/changelog.mdx index 5c3a98abf..ad4a3db79 100644 --- a/docs/ko/changelog.mdx +++ b/docs/ko/changelog.mdx @@ -4,6 +4,44 @@ description: "CrewAI의 제품 업데이트, 개선 사항 및 버그 수정" icon: "clock" mode: "wide" --- + + ## v1.14.0 + + [GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.0) + + ## 변경 사항 + + ### 기능 + - 체크포인트 목록/정보 CLI 명령 추가 + - 추적을 구분하기 위한 guardrail_type 및 이름 추가 + - 체크포인트 저장을 위한 SqliteProvider 추가 + - 자동 체크포인트 생성을 위한 CheckpointConfig 추가 + - 런타임 상태 체크포인트, 이벤트 시스템 및 실행기 리팩토링 구현 + + ### 버그 수정 + - SSRF 및 경로 탐색 보호 추가 + - RAG 도구에 경로 및 URL 유효성 검사 추가 + - 토큰 절약을 위해 메모리 직렬화에서 임베딩 벡터 제외 + - 흐름 템플릿에 쓰기 전에 출력 디렉토리가 존재하는지 확인 + - CVE-2026-35030 문제를 해결하기 위해 litellm을 >=1.83.0으로 업데이트 + - 아랍어 페이지 렌더링을 유발하는 SEO 인덱싱 필드 제거 + + ### 문서 + - v1.14.0에 대한 변경 로그 및 버전 업데이트 + - 명확성을 개선하기 위해 빠른 시작 및 설치 가이드 업데이트 + - 저장소 제공자 섹션 추가, JsonProvider 내보내기 + - AMP 교육 탭 가이드 추가 + + ### 리팩토링 + - 체크포인트 API 정리 + - CodeInterpreterTool 제거 및 코드 실행 매개변수 사용 중단 + + ## 기여자 + + @alex-clawd, @github-actions[bot], @greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay, @lucasgomide + + + ## v1.14.0a4 diff --git a/docs/pt-BR/changelog.mdx b/docs/pt-BR/changelog.mdx index b6cd3aa42..febf0d886 100644 --- a/docs/pt-BR/changelog.mdx +++ b/docs/pt-BR/changelog.mdx @@ -4,6 +4,44 @@ description: "Atualizações de produto, melhorias e correções do CrewAI" icon: "clock" mode: "wide" --- + + ## v1.14.0 + + [Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.0) + + ## O que Mudou + + ### Recursos + - Adicionar comandos CLI de lista/informações de checkpoint + - Adicionar guardrail_type e nome para distinguir rastros + - Adicionar SqliteProvider para armazenamento de checkpoints + - Adicionar CheckpointConfig para checkpointing automático + - Implementar checkpointing de estado em tempo de execução, sistema de eventos e refatoração do executor + + ### Correções de Bugs + - Adicionar proteções contra SSRF e travessia de caminho + - Adicionar validação de caminho e URL às ferramentas RAG + - Excluir vetores de incorporação da serialização de memória para economizar tokens + - Garantir que o diretório de saída exista antes de escrever no modelo de fluxo + - Atualizar litellm para >=1.83.0 para resolver CVE-2026-35030 + - Remover campo de indexação SEO que causava renderização de página em árabe + + ### Documentação + - Atualizar changelog e versão para v1.14.0 + - Atualizar guias de início rápido e instalação para maior clareza + - Adicionar seção de provedores de armazenamento, exportar JsonProvider + - Adicionar guia da aba de Treinamento AMP + + ### Refatoração + - Limpar API de checkpoint + - Remover CodeInterpreterTool e descontinuar parâmetros de execução de código + + ## Contribuidores + + @alex-clawd, @github-actions[bot], @greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay, @lucasgomide + + + ## v1.14.0a4 From c0f3151e1329dc16284eff76c6b5a72f8ed01b16 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Wed, 8 Apr 2026 02:11:34 +0800 Subject: [PATCH 17/21] fix: register checkpoint handlers when CheckpointConfig is created --- .../crewai/agents/agent_builder/base_agent.py | 7 ++++-- lib/crewai/src/crewai/crew.py | 7 ++++-- lib/crewai/src/crewai/flow/flow.py | 7 ++++-- .../src/crewai/state/checkpoint_config.py | 25 +++++++++++++++++-- 4 files changed, 38 insertions(+), 8 deletions(-) diff --git a/lib/crewai/src/crewai/agents/agent_builder/base_agent.py b/lib/crewai/src/crewai/agents/agent_builder/base_agent.py index dbff05e4d..de9379d09 100644 --- a/lib/crewai/src/crewai/agents/agent_builder/base_agent.py +++ b/lib/crewai/src/crewai/agents/agent_builder/base_agent.py @@ -39,7 +39,7 @@ from crewai.memory.unified_memory import Memory from crewai.rag.embeddings.types import EmbedderConfig from crewai.security.security_config import SecurityConfig from crewai.skills.models import Skill -from crewai.state.checkpoint_config import CheckpointConfig +from crewai.state.checkpoint_config import CheckpointConfig, _coerce_checkpoint from crewai.tools.base_tool import BaseTool, Tool from crewai.types.callback import SerializableCallable from crewai.utilities.config import process_config @@ -300,7 +300,10 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta): default_factory=SecurityConfig, description="Security configuration for the agent, including fingerprinting.", ) - checkpoint: CheckpointConfig | bool | None = Field( + checkpoint: Annotated[ + CheckpointConfig | bool | None, + BeforeValidator(_coerce_checkpoint), + ] = Field( default=None, description="Automatic checkpointing configuration. " "True for defaults, False to opt out, None to inherit.", diff --git a/lib/crewai/src/crewai/crew.py b/lib/crewai/src/crewai/crew.py index 4f9ebab5d..e630ec5b0 100644 --- a/lib/crewai/src/crewai/crew.py +++ b/lib/crewai/src/crewai/crew.py @@ -104,7 +104,7 @@ from crewai.rag.types import SearchResult from crewai.security.fingerprint import Fingerprint from crewai.security.security_config import SecurityConfig from crewai.skills.models import Skill -from crewai.state.checkpoint_config import CheckpointConfig +from crewai.state.checkpoint_config import CheckpointConfig, _coerce_checkpoint from crewai.task import Task from crewai.tasks.conditional_task import ConditionalTask from crewai.tasks.task_output import TaskOutput @@ -341,7 +341,10 @@ class Crew(FlowTrackable, BaseModel): default_factory=SecurityConfig, description="Security configuration for the crew, including fingerprinting.", ) - checkpoint: CheckpointConfig | bool | None = Field( + checkpoint: Annotated[ + CheckpointConfig | bool | None, + BeforeValidator(_coerce_checkpoint), + ] = Field( default=None, description="Automatic checkpointing configuration. " "True for defaults, False to opt out, None to inherit.", diff --git a/lib/crewai/src/crewai/flow/flow.py b/lib/crewai/src/crewai/flow/flow.py index 76a96b3f9..60d03b069 100644 --- a/lib/crewai/src/crewai/flow/flow.py +++ b/lib/crewai/src/crewai/flow/flow.py @@ -113,7 +113,7 @@ from crewai.flow.utils import ( ) from crewai.memory.memory_scope import MemoryScope, MemorySlice from crewai.memory.unified_memory import Memory -from crewai.state.checkpoint_config import CheckpointConfig +from crewai.state.checkpoint_config import CheckpointConfig, _coerce_checkpoint if TYPE_CHECKING: @@ -921,7 +921,10 @@ class Flow(BaseModel, Generic[T], metaclass=FlowMeta): max_method_calls: int = Field(default=100) execution_context: ExecutionContext | None = Field(default=None) - checkpoint: CheckpointConfig | bool | None = Field(default=None) + checkpoint: Annotated[ + CheckpointConfig | bool | None, + BeforeValidator(_coerce_checkpoint), + ] = Field(default=None) @classmethod def from_checkpoint( diff --git a/lib/crewai/src/crewai/state/checkpoint_config.py b/lib/crewai/src/crewai/state/checkpoint_config.py index 4c5499ff4..84c48bd4e 100644 --- a/lib/crewai/src/crewai/state/checkpoint_config.py +++ b/lib/crewai/src/crewai/state/checkpoint_config.py @@ -2,9 +2,9 @@ from __future__ import annotations -from typing import Literal +from typing import Any, Literal -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, model_validator from crewai.state.provider.core import BaseProvider from crewai.state.provider.json_provider import JsonProvider @@ -158,6 +158,20 @@ CheckpointEventType = Literal[ ] +def _coerce_checkpoint(v: Any) -> Any: + """BeforeValidator for checkpoint fields on Crew/Flow/Agent. + + Converts True to CheckpointConfig and triggers handler registration. + """ + if v is True: + v = CheckpointConfig() + if isinstance(v, CheckpointConfig): + from crewai.state.checkpoint_listener import _ensure_handlers_registered + + _ensure_handlers_registered() + return v + + class CheckpointConfig(BaseModel): """Configuration for automatic checkpointing. @@ -185,6 +199,13 @@ class CheckpointConfig(BaseModel): "each write. None means keep all.", ) + @model_validator(mode="after") + def _register_handlers(self) -> CheckpointConfig: + from crewai.state.checkpoint_listener import _ensure_handlers_registered + + _ensure_handlers_registered() + return self + @property def trigger_all(self) -> bool: return "*" in self.on_events From 75f162fd3c105623b9d9010a15268cbe6157cce6 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Wed, 8 Apr 2026 03:14:54 +0800 Subject: [PATCH 18/21] refactor: make BaseProvider a BaseModel with provider_type discriminator Replace the Protocol with a BaseModel + ABC so providers serialize and deserialize natively via pydantic. Each provider gets a Literal provider_type field. CheckpointConfig.provider uses a discriminated union so the correct provider class is reconstructed from checkpoint JSON. --- .../src/crewai/state/checkpoint_config.py | 9 +++-- lib/crewai/src/crewai/state/provider/core.py | 35 ++++++------------- .../crewai/state/provider/json_provider.py | 3 ++ .../crewai/state/provider/sqlite_provider.py | 3 ++ 4 files changed, 23 insertions(+), 27 deletions(-) diff --git a/lib/crewai/src/crewai/state/checkpoint_config.py b/lib/crewai/src/crewai/state/checkpoint_config.py index 84c48bd4e..38c6b0490 100644 --- a/lib/crewai/src/crewai/state/checkpoint_config.py +++ b/lib/crewai/src/crewai/state/checkpoint_config.py @@ -2,12 +2,12 @@ from __future__ import annotations -from typing import Any, Literal +from typing import Annotated, Any, Literal from pydantic import BaseModel, Field, model_validator -from crewai.state.provider.core import BaseProvider from crewai.state.provider.json_provider import JsonProvider +from crewai.state.provider.sqlite_provider import SqliteProvider CheckpointEventType = Literal[ @@ -189,7 +189,10 @@ class CheckpointConfig(BaseModel): description="Event types that trigger a checkpoint write. " 'Use ["*"] to checkpoint on every event.', ) - provider: BaseProvider = Field( + provider: Annotated[ + JsonProvider | SqliteProvider, + Field(discriminator="provider_type"), + ] = Field( default_factory=JsonProvider, description="Storage backend. Defaults to JsonProvider.", ) diff --git a/lib/crewai/src/crewai/state/provider/core.py b/lib/crewai/src/crewai/state/provider/core.py index 46f079444..0b12364c0 100644 --- a/lib/crewai/src/crewai/state/provider/core.py +++ b/lib/crewai/src/crewai/state/provider/core.py @@ -1,39 +1,22 @@ -"""Base protocol for state providers.""" +"""Base class for state providers.""" from __future__ import annotations -from typing import Any, Protocol, runtime_checkable +from abc import ABC, abstractmethod -from pydantic import GetCoreSchemaHandler -from pydantic_core import CoreSchema, core_schema +from pydantic import BaseModel -@runtime_checkable -class BaseProvider(Protocol): - """Interface for persisting and restoring runtime state checkpoints. +class BaseProvider(BaseModel, ABC): + """Base class for persisting and restoring runtime state checkpoints. Implementations handle the storage backend — filesystem, cloud, database, etc. — while ``RuntimeState`` handles serialization. """ - @classmethod - def __get_pydantic_core_schema__( - cls, source_type: Any, handler: GetCoreSchemaHandler - ) -> CoreSchema: - """Allow Pydantic to validate any ``BaseProvider`` instance.""" - - def _validate(v: Any) -> BaseProvider: - if isinstance(v, BaseProvider): - return v - raise TypeError(f"Expected a BaseProvider instance, got {type(v)}") - - return core_schema.no_info_plain_validator_function( - _validate, - serialization=core_schema.plain_serializer_function_ser_schema( - lambda v: type(v).__name__, info_arg=False - ), - ) + provider_type: str = "base" + @abstractmethod def checkpoint(self, data: str, location: str) -> str: """Persist a snapshot synchronously. @@ -46,6 +29,7 @@ class BaseProvider(Protocol): """ ... + @abstractmethod async def acheckpoint(self, data: str, location: str) -> str: """Persist a snapshot asynchronously. @@ -58,6 +42,7 @@ class BaseProvider(Protocol): """ ... + @abstractmethod def prune(self, location: str, max_keep: int) -> None: """Remove old checkpoints, keeping at most *max_keep*. @@ -67,6 +52,7 @@ class BaseProvider(Protocol): """ ... + @abstractmethod def from_checkpoint(self, location: str) -> str: """Read a snapshot synchronously. @@ -78,6 +64,7 @@ class BaseProvider(Protocol): """ ... + @abstractmethod async def afrom_checkpoint(self, location: str) -> str: """Read a snapshot asynchronously. diff --git a/lib/crewai/src/crewai/state/provider/json_provider.py b/lib/crewai/src/crewai/state/provider/json_provider.py index d2ac75d9c..f9763e6f3 100644 --- a/lib/crewai/src/crewai/state/provider/json_provider.py +++ b/lib/crewai/src/crewai/state/provider/json_provider.py @@ -7,6 +7,7 @@ import glob import logging import os from pathlib import Path +from typing import Literal import uuid import aiofiles @@ -21,6 +22,8 @@ logger = logging.getLogger(__name__) class JsonProvider(BaseProvider): """Persists runtime state checkpoints as JSON files on the local filesystem.""" + provider_type: Literal["json"] = "json" + def checkpoint(self, data: str, location: str) -> str: """Write a JSON checkpoint file. diff --git a/lib/crewai/src/crewai/state/provider/sqlite_provider.py b/lib/crewai/src/crewai/state/provider/sqlite_provider.py index ae014dda3..e54f56180 100644 --- a/lib/crewai/src/crewai/state/provider/sqlite_provider.py +++ b/lib/crewai/src/crewai/state/provider/sqlite_provider.py @@ -5,6 +5,7 @@ from __future__ import annotations from datetime import datetime, timezone from pathlib import Path import sqlite3 +from typing import Literal import uuid import aiosqlite @@ -47,6 +48,8 @@ class SqliteProvider(BaseProvider): used as the database file path. """ + provider_type: Literal["sqlite"] = "sqlite" + def checkpoint(self, data: str, location: str) -> str: """Write a checkpoint to the SQLite database. From 8700e3db33fd860e2440978aa2f054636620ef70 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Wed, 8 Apr 2026 04:37:31 +0800 Subject: [PATCH 19/21] chore: remove unused flow/config.py --- lib/crewai/src/crewai/flow/config.py | 133 --------------------------- 1 file changed, 133 deletions(-) delete mode 100644 lib/crewai/src/crewai/flow/config.py diff --git a/lib/crewai/src/crewai/flow/config.py b/lib/crewai/src/crewai/flow/config.py deleted file mode 100644 index 021cb65bb..000000000 --- a/lib/crewai/src/crewai/flow/config.py +++ /dev/null @@ -1,133 +0,0 @@ -from typing import Any, Literal, TypedDict - -from typing_extensions import NotRequired - - -DarkGray = Literal["#333333"] -CrewAIOrange = Literal["#FF5A50"] -Gray = Literal["#666666"] -White = Literal["#FFFFFF"] -Black = Literal["#000000"] - - -DARK_GRAY: Literal["#333333"] = "#333333" -CREWAI_ORANGE: Literal["#FF5A50"] = "#FF5A50" -GRAY: Literal["#666666"] = "#666666" -WHITE: Literal["#FFFFFF"] = "#FFFFFF" -BLACK: Literal["#000000"] = "#000000" - - -class FlowColors(TypedDict): - bg: White - start: CrewAIOrange - method: DarkGray - router: DarkGray - router_border: CrewAIOrange - edge: Gray - router_edge: CrewAIOrange - text: White - - -class FontStyles(TypedDict, total=False): - color: DarkGray | CrewAIOrange | Gray | White | Black - multi: Literal["html"] - - -class StartNodeStyle(TypedDict): - color: CrewAIOrange - shape: Literal["box"] - font: FontStyles - label: NotRequired[str] - margin: dict[str, int] - - -class MethodNodeStyle(TypedDict): - color: DarkGray - shape: Literal["box"] - font: FontStyles - label: NotRequired[str] - margin: dict[str, int] - - -class RouterNodeStyle(TypedDict): - color: dict[str, Any] - shape: Literal["box"] - font: FontStyles - label: NotRequired[str] - borderWidth: int - borderWidthSelected: int - shapeProperties: dict[str, list[int] | bool] - margin: dict[str, int] - - -class CrewNodeStyle(TypedDict): - color: dict[str, CrewAIOrange | White] - shape: Literal["box"] - font: FontStyles - label: NotRequired[str] - borderWidth: int - borderWidthSelected: int - shapeProperties: dict[str, bool] - margin: dict[str, int] - - -class NodeStyles(TypedDict): - start: StartNodeStyle - method: MethodNodeStyle - router: RouterNodeStyle - crew: CrewNodeStyle - - -COLORS: FlowColors = { - "bg": WHITE, - "start": CREWAI_ORANGE, - "method": DARK_GRAY, - "router": DARK_GRAY, - "router_border": CREWAI_ORANGE, - "edge": GRAY, - "router_edge": CREWAI_ORANGE, - "text": WHITE, -} - -NODE_STYLES: NodeStyles = { - "start": { - "color": CREWAI_ORANGE, - "shape": "box", - "font": {"color": WHITE}, - "margin": {"top": 10, "bottom": 8, "left": 10, "right": 10}, - }, - "method": { - "color": DARK_GRAY, - "shape": "box", - "font": {"color": WHITE}, - "margin": {"top": 10, "bottom": 8, "left": 10, "right": 10}, - }, - "router": { - "color": { - "background": DARK_GRAY, - "border": CREWAI_ORANGE, - "highlight": { - "border": CREWAI_ORANGE, - "background": DARK_GRAY, - }, - }, - "shape": "box", - "font": {"color": WHITE}, - "borderWidth": 3, - "borderWidthSelected": 4, - "shapeProperties": {"borderDashes": [5, 5]}, - "margin": {"top": 10, "bottom": 8, "left": 10, "right": 10}, - }, - "crew": { - "color": { - "background": WHITE, - "border": CREWAI_ORANGE, - }, - "shape": "box", - "font": {"color": BLACK}, - "borderWidth": 3, - "borderWidthSelected": 4, - "shapeProperties": {"borderDashes": False}, - "margin": {"top": 10, "bottom": 8, "left": 10, "right": 10}, - }, -} From b23b2696fe0572d4db1e8bf4e03156d6189f5db6 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Wed, 8 Apr 2026 04:58:05 +0800 Subject: [PATCH 20/21] fix: remove FilteredStream stdout/stderr wrapper Wrapping sys.stdout and sys.stderr at import time with a threading.Lock is not fork-safe and adds overhead to every print call. litellm.suppress_debug_info already silences the noisy output this was designed to filter. --- lib/crewai/src/crewai/llm.py | 70 ------------------------------------ 1 file changed, 70 deletions(-) diff --git a/lib/crewai/src/crewai/llm.py b/lib/crewai/src/crewai/llm.py index 192fffd1a..e6f5cc68b 100644 --- a/lib/crewai/src/crewai/llm.py +++ b/lib/crewai/src/crewai/llm.py @@ -3,18 +3,14 @@ from __future__ import annotations from collections import defaultdict from collections.abc import Callable from datetime import datetime -import io import json import logging import os -import sys -import threading from typing import ( TYPE_CHECKING, Any, Final, Literal, - TextIO, TypedDict, cast, ) @@ -102,72 +98,6 @@ if LITELLM_AVAILABLE: litellm.suppress_debug_info = True -class FilteredStream(io.TextIOBase): - _lock = None - - def __init__(self, original_stream: TextIO): - self._original_stream = original_stream - self._lock = threading.Lock() - - def write(self, s: str) -> int: - if not self._lock: - self._lock = threading.Lock() - - with self._lock: - lower_s = s.lower() - - # Skip common noisy LiteLLM banners and any other lines that contain "litellm" - if ( - "litellm.info:" in lower_s - or "Consider using a smaller input or implementing a text splitting strategy" - in lower_s - ): - return 0 - - return self._original_stream.write(s) - - def flush(self) -> None: - if self._lock: - with self._lock: - return self._original_stream.flush() - return None - - def __getattr__(self, name: str) -> Any: - """Delegate attribute access to the wrapped original stream. - - This ensures compatibility with libraries (e.g., Rich) that rely on - attributes such as `encoding`, `isatty`, `buffer`, etc., which may not - be explicitly defined on this proxy class. - """ - return getattr(self._original_stream, name) - - # Delegate common properties/methods explicitly so they aren't shadowed by - # the TextIOBase defaults (e.g., .encoding returns None by default, which - # confuses Rich). These explicit pass-throughs ensure the wrapped Console - # still sees a fully-featured stream. - @property - def encoding(self) -> str | Any: # type: ignore[override] - return getattr(self._original_stream, "encoding", "utf-8") - - def isatty(self) -> bool: - return self._original_stream.isatty() - - def fileno(self) -> int: - return self._original_stream.fileno() - - def writable(self) -> bool: - return True - - -# Apply the filtered stream globally so that any subsequent writes containing the filtered -# keywords (e.g., "litellm") are hidden from terminal output. We guard against double -# wrapping to ensure idempotency in environments where this module might be reloaded. -if not isinstance(sys.stdout, FilteredStream): - sys.stdout = FilteredStream(sys.stdout) -if not isinstance(sys.stderr, FilteredStream): - sys.stderr = FilteredStream(sys.stderr) - - MIN_CONTEXT: Final[int] = 1024 MAX_CONTEXT: Final[int] = 2097152 # Current max from gemini-1.5-pro ANTHROPIC_PREFIXES: Final[tuple[str, str, str]] = ("anthropic/", "claude-", "claude/") From 0450d06a6513828210bfb9fdefb6f7de98a7bcc7 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Wed, 8 Apr 2026 07:17:22 +0800 Subject: [PATCH 21/21] refactor: use shared PRINTER singleton --- .../agent_builder/base_agent_executor.py | 2 - .../src/crewai/agents/crew_agent_executor.py | 59 ++++++------- lib/crewai/src/crewai/agents/step_executor.py | 5 +- lib/crewai/src/crewai/cli/add_crew_to_flow.py | 9 +- lib/crewai/src/crewai/cli/crew_chat.py | 8 +- .../src/crewai/experimental/agent_executor.py | 85 +++++++++---------- .../src/crewai/flow/persistence/decorators.py | 14 ++- lib/crewai/src/crewai/flow/utils.py | 22 +++-- lib/crewai/src/crewai/hooks/llm_hooks.py | 9 +- lib/crewai/src/crewai/hooks/tool_hooks.py | 9 +- lib/crewai/src/crewai/lite_agent.py | 25 +++--- lib/crewai/src/crewai/llms/base_llm.py | 12 ++- .../storage/kickoff_task_outputs_storage.py | 2 - .../providers/ibm/embedding_callable.py | 7 +- lib/crewai/src/crewai/task.py | 13 +-- lib/crewai/src/crewai/tools/base_tool.py | 3 - lib/crewai/src/crewai/tools/tool_usage.py | 39 ++++----- .../src/crewai/utilities/agent_utils.py | 6 +- lib/crewai/src/crewai/utilities/converter.py | 8 +- lib/crewai/src/crewai/utilities/logger.py | 7 +- lib/crewai/src/crewai/utilities/printer.py | 3 + .../tests/agents/test_agent_executor.py | 4 - lib/crewai/tests/agents/test_lite_agent.py | 22 +---- lib/crewai/tests/tools/test_tool_usage.py | 5 -- lib/crewai/tests/utilities/test_converter.py | 4 +- 25 files changed, 161 insertions(+), 221 deletions(-) diff --git a/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor.py b/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor.py index ad56807e4..a44b81fc3 100644 --- a/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor.py +++ b/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor.py @@ -6,7 +6,6 @@ from pydantic import BaseModel, Field, PrivateAttr from crewai.agents.parser import AgentFinish from crewai.memory.utils import sanitize_scope_name -from crewai.utilities.printer import Printer from crewai.utilities.string_utils import sanitize_tool_name from crewai.utilities.types import LLMMessage @@ -30,7 +29,6 @@ class BaseAgentExecutor(BaseModel): messages: list[LLMMessage] = Field(default_factory=list) _resuming: bool = PrivateAttr(default=False) _i18n: I18N | None = PrivateAttr(default=None) - _printer: Printer = PrivateAttr(default_factory=Printer) def _save_to_memory(self, output: AgentFinish) -> None: """Save task result to unified memory (memory or crew._memory).""" diff --git a/lib/crewai/src/crewai/agents/crew_agent_executor.py b/lib/crewai/src/crewai/agents/crew_agent_executor.py index 0a002ed8e..6307d5b9c 100644 --- a/lib/crewai/src/crewai/agents/crew_agent_executor.py +++ b/lib/crewai/src/crewai/agents/crew_agent_executor.py @@ -68,6 +68,7 @@ from crewai.utilities.agent_utils import ( from crewai.utilities.constants import TRAINING_DATA_FILE from crewai.utilities.file_store import aget_all_files, get_all_files from crewai.utilities.i18n import I18N, get_i18n +from crewai.utilities.printer import PRINTER from crewai.utilities.string_utils import sanitize_tool_name from crewai.utilities.token_counter_callback import TokenCalcHandler from crewai.utilities.tool_utils import ( @@ -212,13 +213,13 @@ class CrewAgentExecutor(BaseAgentExecutor): formatted_answer = self._invoke_loop() except AssertionError: if self.agent.verbose: - self._printer.print( + PRINTER.print( content="Agent failed to reach a final answer. This is likely a bug - please report it.", color="red", ) raise except Exception as e: - handle_unknown_error(self._printer, e, verbose=self.agent.verbose) + handle_unknown_error(PRINTER, e, verbose=self.agent.verbose) raise if self.ask_for_human_input: @@ -326,7 +327,7 @@ class CrewAgentExecutor(BaseAgentExecutor): if has_reached_max_iterations(self.iterations, self.max_iter): formatted_answer = handle_max_iterations_exceeded( formatted_answer, - printer=self._printer, + printer=PRINTER, i18n=self._i18n, messages=self.messages, llm=cast("BaseLLM", self.llm), @@ -341,7 +342,7 @@ class CrewAgentExecutor(BaseAgentExecutor): llm=cast("BaseLLM", self.llm), messages=self.messages, callbacks=self.callbacks, - printer=self._printer, + printer=PRINTER, from_task=self.task, from_agent=self.agent, response_model=self.response_model, @@ -422,7 +423,7 @@ class CrewAgentExecutor(BaseAgentExecutor): messages=self.messages, iterations=self.iterations, log_error_after=self.log_error_after, - printer=self._printer, + printer=PRINTER, verbose=self.agent.verbose, ) @@ -433,7 +434,7 @@ class CrewAgentExecutor(BaseAgentExecutor): if is_context_length_exceeded(e): handle_context_length( respect_context_window=self.respect_context_window, - printer=self._printer, + printer=PRINTER, messages=self.messages, llm=cast("BaseLLM", self.llm), callbacks=self.callbacks, @@ -441,7 +442,7 @@ class CrewAgentExecutor(BaseAgentExecutor): verbose=self.agent.verbose, ) continue - handle_unknown_error(self._printer, e, verbose=self.agent.verbose) + handle_unknown_error(PRINTER, e, verbose=self.agent.verbose) raise e finally: self.iterations += 1 @@ -482,7 +483,7 @@ class CrewAgentExecutor(BaseAgentExecutor): if has_reached_max_iterations(self.iterations, self.max_iter): formatted_answer = handle_max_iterations_exceeded( None, - printer=self._printer, + printer=PRINTER, i18n=self._i18n, messages=self.messages, llm=cast("BaseLLM", self.llm), @@ -502,7 +503,7 @@ class CrewAgentExecutor(BaseAgentExecutor): llm=cast("BaseLLM", self.llm), messages=self.messages, callbacks=self.callbacks, - printer=self._printer, + printer=PRINTER, tools=openai_tools, available_functions=None, from_task=self.task, @@ -570,7 +571,7 @@ class CrewAgentExecutor(BaseAgentExecutor): if is_context_length_exceeded(e): handle_context_length( respect_context_window=self.respect_context_window, - printer=self._printer, + printer=PRINTER, messages=self.messages, llm=cast("BaseLLM", self.llm), callbacks=self.callbacks, @@ -578,7 +579,7 @@ class CrewAgentExecutor(BaseAgentExecutor): verbose=self.agent.verbose, ) continue - handle_unknown_error(self._printer, e, verbose=self.agent.verbose) + handle_unknown_error(PRINTER, e, verbose=self.agent.verbose) raise e finally: self.iterations += 1 @@ -595,7 +596,7 @@ class CrewAgentExecutor(BaseAgentExecutor): llm=cast("BaseLLM", self.llm), messages=self.messages, callbacks=self.callbacks, - printer=self._printer, + printer=PRINTER, from_task=self.task, from_agent=self.agent, response_model=self.response_model, @@ -965,7 +966,7 @@ class CrewAgentExecutor(BaseAgentExecutor): break except Exception as hook_error: if self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"Error in before_tool_call hook: {hook_error}", color="red", ) @@ -1031,7 +1032,7 @@ class CrewAgentExecutor(BaseAgentExecutor): after_hook_context.tool_result = result except Exception as hook_error: if self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"Error in after_tool_call hook: {hook_error}", color="red", ) @@ -1078,7 +1079,7 @@ class CrewAgentExecutor(BaseAgentExecutor): if self.agent and self.agent.verbose: cache_info = " (from cache)" if from_cache else "" - self._printer.print( + PRINTER.print( content=f"Tool {func_name} executed with result{cache_info}: {result[:200]}...", color="green", ) @@ -1118,13 +1119,13 @@ class CrewAgentExecutor(BaseAgentExecutor): formatted_answer = await self._ainvoke_loop() except AssertionError: if self.agent.verbose: - self._printer.print( + PRINTER.print( content="Agent failed to reach a final answer. This is likely a bug - please report it.", color="red", ) raise except Exception as e: - handle_unknown_error(self._printer, e, verbose=self.agent.verbose) + handle_unknown_error(PRINTER, e, verbose=self.agent.verbose) raise if self.ask_for_human_input: @@ -1168,7 +1169,7 @@ class CrewAgentExecutor(BaseAgentExecutor): if has_reached_max_iterations(self.iterations, self.max_iter): formatted_answer = handle_max_iterations_exceeded( formatted_answer, - printer=self._printer, + printer=PRINTER, i18n=self._i18n, messages=self.messages, llm=cast("BaseLLM", self.llm), @@ -1183,7 +1184,7 @@ class CrewAgentExecutor(BaseAgentExecutor): llm=cast("BaseLLM", self.llm), messages=self.messages, callbacks=self.callbacks, - printer=self._printer, + printer=PRINTER, from_task=self.task, from_agent=self.agent, response_model=self.response_model, @@ -1263,7 +1264,7 @@ class CrewAgentExecutor(BaseAgentExecutor): messages=self.messages, iterations=self.iterations, log_error_after=self.log_error_after, - printer=self._printer, + printer=PRINTER, verbose=self.agent.verbose, ) @@ -1273,7 +1274,7 @@ class CrewAgentExecutor(BaseAgentExecutor): if is_context_length_exceeded(e): handle_context_length( respect_context_window=self.respect_context_window, - printer=self._printer, + printer=PRINTER, messages=self.messages, llm=cast("BaseLLM", self.llm), callbacks=self.callbacks, @@ -1281,7 +1282,7 @@ class CrewAgentExecutor(BaseAgentExecutor): verbose=self.agent.verbose, ) continue - handle_unknown_error(self._printer, e, verbose=self.agent.verbose) + handle_unknown_error(PRINTER, e, verbose=self.agent.verbose) raise e finally: self.iterations += 1 @@ -1316,7 +1317,7 @@ class CrewAgentExecutor(BaseAgentExecutor): if has_reached_max_iterations(self.iterations, self.max_iter): formatted_answer = handle_max_iterations_exceeded( None, - printer=self._printer, + printer=PRINTER, i18n=self._i18n, messages=self.messages, llm=cast("BaseLLM", self.llm), @@ -1336,7 +1337,7 @@ class CrewAgentExecutor(BaseAgentExecutor): llm=cast("BaseLLM", self.llm), messages=self.messages, callbacks=self.callbacks, - printer=self._printer, + printer=PRINTER, tools=openai_tools, available_functions=None, from_task=self.task, @@ -1403,7 +1404,7 @@ class CrewAgentExecutor(BaseAgentExecutor): if is_context_length_exceeded(e): handle_context_length( respect_context_window=self.respect_context_window, - printer=self._printer, + printer=PRINTER, messages=self.messages, llm=cast("BaseLLM", self.llm), callbacks=self.callbacks, @@ -1411,7 +1412,7 @@ class CrewAgentExecutor(BaseAgentExecutor): verbose=self.agent.verbose, ) continue - handle_unknown_error(self._printer, e, verbose=self.agent.verbose) + handle_unknown_error(PRINTER, e, verbose=self.agent.verbose) raise e finally: self.iterations += 1 @@ -1428,7 +1429,7 @@ class CrewAgentExecutor(BaseAgentExecutor): llm=cast("BaseLLM", self.llm), messages=self.messages, callbacks=self.callbacks, - printer=self._printer, + printer=PRINTER, from_task=self.task, from_agent=self.agent, response_model=self.response_model, @@ -1576,7 +1577,7 @@ class CrewAgentExecutor(BaseAgentExecutor): if train_iteration is None or not isinstance(train_iteration, int): if self.agent.verbose: - self._printer.print( + PRINTER.print( content="Invalid or missing train iteration. Cannot save training data.", color="red", ) @@ -1600,7 +1601,7 @@ class CrewAgentExecutor(BaseAgentExecutor): agent_training_data[train_iteration]["improved_output"] = result.output else: if self.agent.verbose: - self._printer.print( + PRINTER.print( content=( f"No existing training data for agent {agent_id} and iteration " f"{train_iteration}. Cannot save improved output." diff --git a/lib/crewai/src/crewai/agents/step_executor.py b/lib/crewai/src/crewai/agents/step_executor.py index 29836497c..48592efb4 100644 --- a/lib/crewai/src/crewai/agents/step_executor.py +++ b/lib/crewai/src/crewai/agents/step_executor.py @@ -40,7 +40,7 @@ from crewai.utilities.agent_utils import ( ) from crewai.utilities.i18n import I18N, get_i18n from crewai.utilities.planning_types import TodoItem -from crewai.utilities.printer import Printer +from crewai.utilities.printer import PRINTER from crewai.utilities.step_execution_context import StepExecutionContext, StepResult from crewai.utilities.string_utils import sanitize_tool_name from crewai.utilities.tool_utils import execute_tool_and_check_finality @@ -109,7 +109,6 @@ class StepExecutor: self.request_within_rpm_limit = request_within_rpm_limit self.callbacks = callbacks or [] self._i18n: I18N = i18n or get_i18n() - self._printer: Printer = Printer() # Native tool support — set up once self._use_native_tools = check_native_tool_support( @@ -585,7 +584,7 @@ class StepExecutor: task=self.task, crew=self.crew, event_source=self, - printer=self._printer, + printer=PRINTER, verbose=bool(self.agent and self.agent.verbose), ) diff --git a/lib/crewai/src/crewai/cli/add_crew_to_flow.py b/lib/crewai/src/crewai/cli/add_crew_to_flow.py index a3e0f5209..c286b5010 100644 --- a/lib/crewai/src/crewai/cli/add_crew_to_flow.py +++ b/lib/crewai/src/crewai/cli/add_crew_to_flow.py @@ -3,17 +3,14 @@ from pathlib import Path import click from crewai.cli.utils import copy_template -from crewai.utilities.printer import Printer - - -_printer = Printer() +from crewai.utilities.printer import PRINTER def add_crew_to_flow(crew_name: str) -> None: """Add a new crew to the current flow.""" # Check if pyproject.toml exists in the current directory if not Path("pyproject.toml").exists(): - _printer.print( + PRINTER.print( "This command must be run from the root of a flow project.", color="red" ) raise click.ClickException( @@ -25,7 +22,7 @@ def add_crew_to_flow(crew_name: str) -> None: crews_folder = flow_folder / "src" / flow_folder.name / "crews" if not crews_folder.exists(): - _printer.print("Crews folder does not exist in the current flow.", color="red") + PRINTER.print("Crews folder does not exist in the current flow.", color="red") raise click.ClickException("Crews folder does not exist in the current flow.") # Create the crew within the flow's crews directory diff --git a/lib/crewai/src/crewai/cli/crew_chat.py b/lib/crewai/src/crewai/cli/crew_chat.py index bbbd51c0c..ad1c65894 100644 --- a/lib/crewai/src/crewai/cli/crew_chat.py +++ b/lib/crewai/src/crewai/cli/crew_chat.py @@ -19,12 +19,10 @@ from crewai.llm import LLM from crewai.llms.base_llm import BaseLLM from crewai.types.crew_chat import ChatInputField, ChatInputs from crewai.utilities.llm_utils import create_llm -from crewai.utilities.printer import Printer +from crewai.utilities.printer import PRINTER from crewai.utilities.types import LLMMessage -_printer = Printer() - MIN_REQUIRED_VERSION: Final[Literal["0.98.0"]] = "0.98.0" @@ -121,9 +119,9 @@ def run_chat() -> None: def show_loading(event: threading.Event) -> None: """Display animated loading dots while processing.""" while not event.is_set(): - _printer.print(".", end="") + PRINTER.print(".", end="") time.sleep(1) - _printer.print("") + PRINTER.print("") def initialize_chat_llm(crew: Crew) -> LLM | BaseLLM | None: diff --git a/lib/crewai/src/crewai/experimental/agent_executor.py b/lib/crewai/src/crewai/experimental/agent_executor.py index 067489c8e..72b732766 100644 --- a/lib/crewai/src/crewai/experimental/agent_executor.py +++ b/lib/crewai/src/crewai/experimental/agent_executor.py @@ -98,7 +98,7 @@ from crewai.utilities.planning_types import ( TodoItem, TodoList, ) -from crewai.utilities.printer import Printer +from crewai.utilities.printer import PRINTER from crewai.utilities.step_execution_context import StepExecutionContext, StepResult from crewai.utilities.string_utils import sanitize_tool_name from crewai.utilities.tool_utils import execute_tool_and_check_finality @@ -199,7 +199,6 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor ) _i18n: I18N = PrivateAttr(default_factory=get_i18n) - _printer: Printer = PrivateAttr(default_factory=Printer) _console: Console = PrivateAttr(default_factory=Console) _last_parser_error: OutputParserError | None = PrivateAttr(default=None) _last_context_error: Exception | None = PrivateAttr(default=None) @@ -503,7 +502,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor ) if self.agent.verbose: - self._printer.print( + PRINTER.print( content=( f"[Observe] Step {current_todo.step_number} " f"(effort={effort}): " @@ -553,7 +552,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor current_todo.step_number, result=current_todo.result ) if self.agent.verbose: - self._printer.print( + PRINTER.print( content=( f"[Low] Step {current_todo.step_number} hard-failed " f"— triggering replan: {observation.replan_reason}" @@ -572,7 +571,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor if self.agent.verbose: completed = self.state.todos.completed_count total = len(self.state.todos.items) - self._printer.print( + PRINTER.print( content=f"[Low] Step {current_todo.step_number} done ({completed}/{total}) — continuing", color="green", ) @@ -605,7 +604,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor if self.agent.verbose: completed = self.state.todos.completed_count total = len(self.state.todos.items) - self._printer.print( + PRINTER.print( content=f"[Medium] Step {current_todo.step_number} succeeded ({completed}/{total}) — continuing", color="green", ) @@ -618,7 +617,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor current_todo.step_number, result=current_todo.result ) if self.agent.verbose: - self._printer.print( + PRINTER.print( content=( f"[Medium] Step {current_todo.step_number} failed + replan required " f"— triggering replan: {observation.replan_reason}" @@ -638,7 +637,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor if self.agent.verbose: failed = len(self.state.todos.get_failed_todos()) total = len(self.state.todos.items) - self._printer.print( + PRINTER.print( content=( f"[Medium] Step {current_todo.step_number} failed but no replan needed " f"({failed} failed/{total} total) — continuing" @@ -680,7 +679,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor current_todo.step_number, result=current_todo.result ) if self.agent.verbose: - self._printer.print( + PRINTER.print( content="[Decide] Goal achieved early — finalizing", color="green", ) @@ -692,7 +691,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor current_todo.step_number, result=current_todo.result ) if self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"[Decide] Full replan needed: {observation.replan_reason}", color="yellow", ) @@ -705,7 +704,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor current_todo.step_number, result=current_todo.result ) if self.agent.verbose: - self._printer.print( + PRINTER.print( content="[Decide] Step failed — triggering replan", color="yellow", ) @@ -718,7 +717,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor current_todo.step_number, result=current_todo.result ) if self.agent.verbose: - self._printer.print( + PRINTER.print( content="[Decide] Plan valid but refining upcoming steps", color="cyan", ) @@ -731,7 +730,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor if self.agent.verbose: completed = self.state.todos.completed_count total = len(self.state.todos.items) - self._printer.print( + PRINTER.print( content=f"[Decide] Continue plan ({completed}/{total} done)", color="green", ) @@ -776,7 +775,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor ) if self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"[Refine] Updated {len(remaining)} pending step(s)", color="cyan", ) @@ -811,7 +810,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor ) if self.agent.verbose: - self._printer.print( + PRINTER.print( content="Goal achieved early — skipping remaining steps", color="green", ) @@ -829,7 +828,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor if self.state.replan_count >= max_replans: if self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"Max replans ({max_replans}) reached — finalizing with current results", color="yellow", ) @@ -936,7 +935,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor # Plan-and-Execute path: use StepExecutor for isolated execution if getattr(self.agent, "planning_enabled", False): if self.agent.verbose: - self._printer.print( + PRINTER.print( content=( f"[Execute] Step {current.step_number}: " f"{current.description[:60]}..." @@ -971,7 +970,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor if self.agent.verbose: status = "success" if result.success else "failed" - self._printer.print( + PRINTER.print( content=( f"[Execute] Step {current.step_number} {status} " f"({result.execution_time:.1f}s, " @@ -1080,7 +1079,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor todo.result = error_msg self.state.todos.mark_failed(todo.step_number, result=error_msg) if self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"Todo {todo.step_number} failed: {error_msg}", color="red", ) @@ -1105,7 +1104,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor if self.agent.verbose: status = "success" if step_result.success else "failed" - self._printer.print( + PRINTER.print( content=( f"[Execute] Step {todo.step_number} {status} " f"({step_result.execution_time:.1f}s, " @@ -1152,7 +1151,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor self.state.todos.mark_failed(todo.step_number, result=todo.result) if self.agent.verbose: - self._printer.print( + PRINTER.print( content=( f"[Observe] Step {todo.step_number} " f"(effort={effort}): " @@ -1203,7 +1202,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor """Force agent to provide final answer when max iterations exceeded.""" formatted_answer = handle_max_iterations_exceeded( formatted_answer=None, - printer=self._printer, + printer=PRINTER, i18n=self._i18n, messages=list(self.state.messages), llm=self.llm, @@ -1232,7 +1231,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor llm=self.llm, messages=list(self.state.messages), callbacks=self.callbacks, - printer=self._printer, + printer=PRINTER, from_task=self.task, from_agent=self.agent, response_model=self.response_model, @@ -1282,7 +1281,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor return "context_error" if e.__class__.__module__.startswith("litellm"): raise e - handle_unknown_error(self._printer, e, verbose=self.agent.verbose) + handle_unknown_error(PRINTER, e, verbose=self.agent.verbose) raise @router("continue_reasoning_native") @@ -1318,7 +1317,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor llm=self.llm, messages=list(self.state.messages), callbacks=self.callbacks, - printer=self._printer, + printer=PRINTER, tools=self._openai_tools, available_functions=None, from_task=self.task, @@ -1373,7 +1372,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor return "context_error" if e.__class__.__module__.startswith("litellm"): raise e - handle_unknown_error(self._printer, e, verbose=self.agent.verbose) + handle_unknown_error(PRINTER, e, verbose=self.agent.verbose) raise def _route_finish_with_todos( @@ -1442,9 +1441,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor ) except Exception as e: if self.agent and self.agent.verbose: - self._printer.print( - content=f"Error in tool execution: {e}", color="red" - ) + PRINTER.print(content=f"Error in tool execution: {e}", color="red") if self.task: self.task.increment_tools_errors() @@ -1598,7 +1595,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor # Log the tool execution if self.agent and self.agent.verbose: cache_info = " (from cache)" if from_cache else "" - self._printer.print( + PRINTER.print( content=f"Tool {func_name} executed with result{cache_info}: {result[:200]}...", color="green", ) @@ -1636,7 +1633,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor # Log the tool execution if self.agent and self.agent.verbose: cache_info = " (from cache)" if from_cache else "" - self._printer.print( + PRINTER.print( content=f"Tool {func_name} executed with result{cache_info}: {result[:200]}...", color="green", ) @@ -1800,7 +1797,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor break except Exception as hook_error: if self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"Error in before_tool_call hook: {hook_error}", color="red", ) @@ -1875,7 +1872,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor after_hook_context.tool_result = result except Exception as hook_error: if self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"Error in after_tool_call hook: {hook_error}", color="red", ) @@ -2033,7 +2030,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor if self.agent.verbose: completed = self.state.todos.completed_count total = len(self.state.todos.items) - self._printer.print( + PRINTER.print( content=f"✓ Todo {step_number} completed ({completed}/{total})", color="green", ) @@ -2100,7 +2097,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor self._finalize_called = True if self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"[Finalize] todos_count={len(self.state.todos.items)}, todos_with_results={sum(1 for t in self.state.todos.items if t.result)}", color="magenta", ) @@ -2263,7 +2260,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor except Exception as e: if self.agent and self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"Synthesis LLM call failed ({e}), falling back to concatenation", color="yellow", ) @@ -2348,7 +2345,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor self.state.last_replan_reason = reason if self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"Triggering replan (attempt {self.state.replan_count}): {reason}", color="yellow", ) @@ -2408,7 +2405,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor self.state.todos.replace_pending_todos(new_todos) if self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"Replan: {len(new_todos)} new steps (completed history preserved)", color="green", ) @@ -2492,7 +2489,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor if self.state.replan_count >= max_replans: if self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"Max replans ({max_replans}) reached — finalizing with current results", color="yellow", ) @@ -2518,7 +2515,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor messages=list(self.state.messages), iterations=self.state.iterations, log_error_after=self.log_error_after, - printer=self._printer, + printer=PRINTER, verbose=self.agent.verbose, ) @@ -2534,7 +2531,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor """Recover from context length errors and retry.""" handle_context_length( respect_context_window=self.respect_context_window, - printer=self._printer, + printer=PRINTER, messages=self.state.messages, llm=self.llm, callbacks=self.callbacks, @@ -2637,7 +2634,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor self._console.print(fail_text) raise except Exception as e: - handle_unknown_error(self._printer, e, verbose=self.agent.verbose) + handle_unknown_error(PRINTER, e, verbose=self.agent.verbose) raise finally: self._is_executing = False @@ -2728,7 +2725,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor self._console.print(fail_text) raise except Exception as e: - handle_unknown_error(self._printer, e, verbose=self.agent.verbose) + handle_unknown_error(PRINTER, e, verbose=self.agent.verbose) raise finally: self._is_executing = False @@ -2793,7 +2790,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor task.result() except Exception as e: if self.agent.verbose: - self._printer.print( + PRINTER.print( content=f"Error in async step_callback task: {e!s}", color="red", ) diff --git a/lib/crewai/src/crewai/flow/persistence/decorators.py b/lib/crewai/src/crewai/flow/persistence/decorators.py index 20c860353..937b557f4 100644 --- a/lib/crewai/src/crewai/flow/persistence/decorators.py +++ b/lib/crewai/src/crewai/flow/persistence/decorators.py @@ -28,13 +28,13 @@ import asyncio from collections.abc import Callable import functools import logging -from typing import TYPE_CHECKING, Any, ClassVar, Final, TypeVar, cast +from typing import TYPE_CHECKING, Any, Final, TypeVar, cast from pydantic import BaseModel from crewai.flow.persistence.base import FlowPersistence from crewai.flow.persistence.sqlite import SQLiteFlowPersistence -from crewai.utilities.printer import Printer +from crewai.utilities.printer import PRINTER if TYPE_CHECKING: @@ -56,8 +56,6 @@ LOG_MESSAGES: Final[dict[str, str]] = { class PersistenceDecorator: """Class to handle flow state persistence with consistent logging.""" - _printer: ClassVar[Printer] = Printer() - @classmethod def persist_state( cls, @@ -104,7 +102,7 @@ class PersistenceDecorator: # Log state saving only if verbose is True if verbose: - cls._printer.print( + PRINTER.print( LOG_MESSAGES["save_state"].format(flow_uuid), color="cyan" ) logger.info(LOG_MESSAGES["save_state"].format(flow_uuid)) @@ -119,19 +117,19 @@ class PersistenceDecorator: except Exception as e: error_msg = LOG_MESSAGES["save_error"].format(method_name, str(e)) if verbose: - cls._printer.print(error_msg, color="red") + PRINTER.print(error_msg, color="red") logger.error(error_msg) raise RuntimeError(f"State persistence failed: {e!s}") from e except AttributeError as e: error_msg = LOG_MESSAGES["state_missing"] if verbose: - cls._printer.print(error_msg, color="red") + PRINTER.print(error_msg, color="red") logger.error(error_msg) raise ValueError(error_msg) from e except (TypeError, ValueError) as e: error_msg = LOG_MESSAGES["id_missing"] if verbose: - cls._printer.print(error_msg, color="red") + PRINTER.print(error_msg, color="red") logger.error(error_msg) raise ValueError(error_msg) from e diff --git a/lib/crewai/src/crewai/flow/utils.py b/lib/crewai/src/crewai/flow/utils.py index 5dc812fc3..652a38f4c 100644 --- a/lib/crewai/src/crewai/flow/utils.py +++ b/lib/crewai/src/crewai/flow/utils.py @@ -32,14 +32,12 @@ from crewai.flow.flow_wrappers import ( SimpleFlowCondition, ) from crewai.flow.types import FlowMethodCallable, FlowMethodName -from crewai.utilities.printer import Printer +from crewai.utilities.printer import PRINTER if TYPE_CHECKING: from crewai.flow.flow import Flow -_printer = Printer() - def _extract_string_literals_from_type_annotation( node: ast.expr, @@ -181,7 +179,7 @@ def get_possible_return_constants( return None except Exception as e: if verbose: - _printer.print( + PRINTER.print( f"Error retrieving source code for function {function.__name__}: {e}", color="red", ) @@ -194,27 +192,27 @@ def get_possible_return_constants( code_ast = ast.parse(source) except IndentationError as e: if verbose: - _printer.print( + PRINTER.print( f"IndentationError while parsing source code of {function.__name__}: {e}", color="red", ) - _printer.print(f"Source code:\n{source}", color="yellow") + PRINTER.print(f"Source code:\n{source}", color="yellow") return None except SyntaxError as e: if verbose: - _printer.print( + PRINTER.print( f"SyntaxError while parsing source code of {function.__name__}: {e}", color="red", ) - _printer.print(f"Source code:\n{source}", color="yellow") + PRINTER.print(f"Source code:\n{source}", color="yellow") return None except Exception as e: if verbose: - _printer.print( + PRINTER.print( f"Unexpected error while parsing source code of {function.__name__}: {e}", color="red", ) - _printer.print(f"Source code:\n{source}", color="yellow") + PRINTER.print(f"Source code:\n{source}", color="yellow") return None return_values: set[str] = set() @@ -395,13 +393,13 @@ def get_possible_return_constants( StateAttributeVisitor().visit(class_ast) except Exception as e: if verbose: - _printer.print( + PRINTER.print( f"Could not analyze class context for {function.__name__}: {e}", color="yellow", ) except Exception as e: if verbose: - _printer.print( + PRINTER.print( f"Could not introspect class for {function.__name__}: {e}", color="yellow", ) diff --git a/lib/crewai/src/crewai/hooks/llm_hooks.py b/lib/crewai/src/crewai/hooks/llm_hooks.py index 3a6abbedf..bc3d1d17d 100644 --- a/lib/crewai/src/crewai/hooks/llm_hooks.py +++ b/lib/crewai/src/crewai/hooks/llm_hooks.py @@ -9,7 +9,7 @@ from crewai.hooks.types import ( BeforeLLMCallHookCallable, BeforeLLMCallHookType, ) -from crewai.utilities.printer import Printer +from crewai.utilities.printer import PRINTER if TYPE_CHECKING: @@ -138,16 +138,15 @@ class LLMCallHookContext: ... print("LLM call skipped by user") """ - printer = Printer() event_listener.formatter.pause_live_updates() try: - printer.print(content=f"\n{prompt}", color="bold_yellow") - printer.print(content=default_message, color="cyan") + PRINTER.print(content=f"\n{prompt}", color="bold_yellow") + PRINTER.print(content=default_message, color="cyan") response = input().strip() if response: - printer.print(content="\nProcessing your input...", color="cyan") + PRINTER.print(content="\nProcessing your input...", color="cyan") return response finally: diff --git a/lib/crewai/src/crewai/hooks/tool_hooks.py b/lib/crewai/src/crewai/hooks/tool_hooks.py index ac7f5c362..6d9c015b5 100644 --- a/lib/crewai/src/crewai/hooks/tool_hooks.py +++ b/lib/crewai/src/crewai/hooks/tool_hooks.py @@ -9,7 +9,7 @@ from crewai.hooks.types import ( BeforeToolCallHookCallable, BeforeToolCallHookType, ) -from crewai.utilities.printer import Printer +from crewai.utilities.printer import PRINTER if TYPE_CHECKING: @@ -100,16 +100,15 @@ class ToolCallHookContext: ... return None # Allow execution """ - printer = Printer() event_listener.formatter.pause_live_updates() try: - printer.print(content=f"\n{prompt}", color="bold_yellow") - printer.print(content=default_message, color="cyan") + PRINTER.print(content=f"\n{prompt}", color="bold_yellow") + PRINTER.print(content=default_message, color="cyan") response = input().strip() if response: - printer.print(content="\nProcessing your input...", color="cyan") + PRINTER.print(content="\nProcessing your input...", color="cyan") return response finally: diff --git a/lib/crewai/src/crewai/lite_agent.py b/lib/crewai/src/crewai/lite_agent.py index 2bed7e92f..f96c84493 100644 --- a/lib/crewai/src/crewai/lite_agent.py +++ b/lib/crewai/src/crewai/lite_agent.py @@ -91,7 +91,7 @@ from crewai.utilities.guardrail import process_guardrail from crewai.utilities.guardrail_types import GuardrailCallable, GuardrailType from crewai.utilities.i18n import I18N, get_i18n from crewai.utilities.llm_utils import create_llm -from crewai.utilities.printer import Printer +from crewai.utilities.printer import PRINTER from crewai.utilities.pydantic_schema_utils import generate_model_description from crewai.utilities.token_counter_callback import TokenCalcHandler from crewai.utilities.tool_utils import execute_tool_and_check_finality @@ -270,7 +270,6 @@ class LiteAgent(FlowTrackable, BaseModel): _key: str = PrivateAttr(default_factory=lambda: str(uuid.uuid4())) _messages: list[LLMMessage] = PrivateAttr(default_factory=list) _iterations: int = PrivateAttr(default=0) - _printer: Printer = PrivateAttr(default_factory=Printer) _guardrail: GuardrailCallable | None = PrivateAttr(default=None) _guardrail_retry_count: int = PrivateAttr(default=0) _callbacks: list[TokenCalcHandler] = PrivateAttr(default_factory=list) @@ -528,11 +527,11 @@ class LiteAgent(FlowTrackable, BaseModel): except Exception as e: if self.verbose: - self._printer.print( + PRINTER.print( content="Agent failed to reach a final answer. This is likely a bug - please report it.", color="red", ) - handle_unknown_error(self._printer, e, verbose=self.verbose) + handle_unknown_error(PRINTER, e, verbose=self.verbose) # Emit error event crewai_event_bus.emit( self, @@ -609,7 +608,7 @@ class LiteAgent(FlowTrackable, BaseModel): self._memory.remember_many(extracted, agent_role=self.role) except Exception as e: if self.verbose: - self._printer.print( + PRINTER.print( content=f"Failed to save to memory: {e}", color="yellow", ) @@ -661,7 +660,7 @@ class LiteAgent(FlowTrackable, BaseModel): formatted_result = result except ConverterError as e: if self.verbose: - self._printer.print( + PRINTER.print( content=f"Failed to parse output into response format after retries: {e.message}", color="yellow", ) @@ -704,7 +703,7 @@ class LiteAgent(FlowTrackable, BaseModel): ) self._guardrail_retry_count += 1 if self.verbose: - self._printer.print( + PRINTER.print( f"Guardrail failed. Retrying ({self._guardrail_retry_count}/{self.guardrail_max_retries})..." f"\n{guardrail_result.error}" ) @@ -875,7 +874,7 @@ class LiteAgent(FlowTrackable, BaseModel): if has_reached_max_iterations(self._iterations, self.max_iterations): formatted_answer = handle_max_iterations_exceeded( formatted_answer, - printer=self._printer, + printer=PRINTER, i18n=self.i18n, messages=self._messages, llm=cast(LLM, self.llm), @@ -890,7 +889,7 @@ class LiteAgent(FlowTrackable, BaseModel): llm=cast(LLM, self.llm), messages=self._messages, callbacks=self._callbacks, - printer=self._printer, + printer=PRINTER, from_agent=self, # type: ignore[arg-type] executor_context=self, response_model=response_model, @@ -933,7 +932,7 @@ class LiteAgent(FlowTrackable, BaseModel): self._append_message(formatted_answer.text, role="assistant") except OutputParserError as e: if self.verbose: - self._printer.print( + PRINTER.print( content="Failed to parse LLM output. Retrying...", color="yellow", ) @@ -942,7 +941,7 @@ class LiteAgent(FlowTrackable, BaseModel): messages=self._messages, iterations=self._iterations, log_error_after=3, - printer=self._printer, + printer=PRINTER, verbose=self.verbose, ) @@ -953,7 +952,7 @@ class LiteAgent(FlowTrackable, BaseModel): if is_context_length_exceeded(e): handle_context_length( respect_context_window=self.respect_context_window, - printer=self._printer, + printer=PRINTER, messages=self._messages, llm=cast(LLM, self.llm), callbacks=self._callbacks, @@ -961,7 +960,7 @@ class LiteAgent(FlowTrackable, BaseModel): verbose=self.verbose, ) continue - handle_unknown_error(self._printer, e, verbose=self.verbose) + handle_unknown_error(PRINTER, e, verbose=self.verbose) raise e finally: diff --git a/lib/crewai/src/crewai/llms/base_llm.py b/lib/crewai/src/crewai/llms/base_llm.py index fd3c8c45e..41ce1d2cd 100644 --- a/lib/crewai/src/crewai/llms/base_llm.py +++ b/lib/crewai/src/crewai/llms/base_llm.py @@ -857,7 +857,7 @@ class BaseLLM(BaseModel, ABC): LLMCallHookContext, get_before_llm_call_hooks, ) - from crewai.utilities.printer import Printer + from crewai.utilities.printer import PRINTER before_hooks = get_before_llm_call_hooks() if not before_hooks: @@ -872,21 +872,20 @@ class BaseLLM(BaseModel, ABC): crew=None, ) verbose = getattr(from_agent, "verbose", True) if from_agent else True - printer = Printer() try: for hook in before_hooks: result = hook(hook_context) if result is False: if verbose: - printer.print( + PRINTER.print( content="LLM call blocked by before_llm_call hook", color="yellow", ) return False except Exception as e: if verbose: - printer.print( + PRINTER.print( content=f"Error in before_llm_call hook: {e}", color="yellow", ) @@ -927,7 +926,7 @@ class BaseLLM(BaseModel, ABC): LLMCallHookContext, get_after_llm_call_hooks, ) - from crewai.utilities.printer import Printer + from crewai.utilities.printer import PRINTER after_hooks = get_after_llm_call_hooks() if not after_hooks: @@ -943,7 +942,6 @@ class BaseLLM(BaseModel, ABC): response=response, ) verbose = getattr(from_agent, "verbose", True) if from_agent else True - printer = Printer() modified_response = response try: @@ -954,7 +952,7 @@ class BaseLLM(BaseModel, ABC): hook_context.response = modified_response except Exception as e: if verbose: - printer.print( + PRINTER.print( content=f"Error in after_llm_call hook: {e}", color="yellow", ) diff --git a/lib/crewai/src/crewai/memory/storage/kickoff_task_outputs_storage.py b/lib/crewai/src/crewai/memory/storage/kickoff_task_outputs_storage.py index 6cc6b6c64..3f5f38c9f 100644 --- a/lib/crewai/src/crewai/memory/storage/kickoff_task_outputs_storage.py +++ b/lib/crewai/src/crewai/memory/storage/kickoff_task_outputs_storage.py @@ -6,7 +6,6 @@ import sqlite3 from typing import Any from crewai.task import Task -from crewai.utilities import Printer from crewai.utilities.crew_json_encoder import CrewJSONEncoder from crewai.utilities.errors import DatabaseError, DatabaseOperationError from crewai.utilities.lock_store import lock as store_lock @@ -27,7 +26,6 @@ class KickoffTaskOutputsSQLiteStorage: db_path = str(Path(db_storage_path()) / "latest_kickoff_task_outputs.db") self.db_path = db_path self._lock_name = f"sqlite:{os.path.realpath(self.db_path)}" - self._printer: Printer = Printer() self._initialize_db() def _initialize_db(self) -> None: diff --git a/lib/crewai/src/crewai/rag/embeddings/providers/ibm/embedding_callable.py b/lib/crewai/src/crewai/rag/embeddings/providers/ibm/embedding_callable.py index 7104c1705..44e97149a 100644 --- a/lib/crewai/src/crewai/rag/embeddings/providers/ibm/embedding_callable.py +++ b/lib/crewai/src/crewai/rag/embeddings/providers/ibm/embedding_callable.py @@ -6,10 +6,7 @@ from chromadb.api.types import Documents, EmbeddingFunction, Embeddings from typing_extensions import Unpack from crewai.rag.embeddings.providers.ibm.types import WatsonXProviderConfig -from crewai.utilities.printer import Printer - - -_printer = Printer() +from crewai.utilities.printer import PRINTER class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]): @@ -164,5 +161,5 @@ class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]): return cast(Embeddings, embeddings) except Exception as e: if self._verbose: - _printer.print(f"Error during WatsonX embedding: {e}", color="red") + PRINTER.print(f"Error during WatsonX embedding: {e}", color="red") raise diff --git a/lib/crewai/src/crewai/task.py b/lib/crewai/src/crewai/task.py index 73e49ade9..5671282dc 100644 --- a/lib/crewai/src/crewai/task.py +++ b/lib/crewai/src/crewai/task.py @@ -81,13 +81,10 @@ from crewai.utilities.guardrail_types import ( GuardrailsType, ) from crewai.utilities.i18n import I18N, get_i18n -from crewai.utilities.printer import Printer +from crewai.utilities.printer import PRINTER from crewai.utilities.string_utils import interpolate_only -_printer = Printer() - - class Task(BaseModel): """Class that represents a task to be executed. @@ -981,7 +978,7 @@ Follow these guidelines: crew_chat_messages = json.loads(crew_chat_messages_json) except json.JSONDecodeError as e: if self.agent and self.agent.verbose: - _printer.print( + PRINTER.print( f"An error occurred while parsing crew chat messages: {e}", color="red", ) @@ -1227,8 +1224,7 @@ Follow these guidelines: task_output=task_output.raw, ) if agent and agent.verbose: - printer = Printer() - printer.print( + PRINTER.print( content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n", color="yellow", ) @@ -1325,8 +1321,7 @@ Follow these guidelines: task_output=task_output.raw, ) if agent and agent.verbose: - printer = Printer() - printer.print( + PRINTER.print( content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n", color="yellow", ) diff --git a/lib/crewai/src/crewai/tools/base_tool.py b/lib/crewai/src/crewai/tools/base_tool.py index 11f88a768..e1dc8f2ee 100644 --- a/lib/crewai/src/crewai/tools/base_tool.py +++ b/lib/crewai/src/crewai/tools/base_tool.py @@ -38,13 +38,10 @@ from crewai.tools.structured_tool import ( build_schema_hint, ) from crewai.types.callback import SerializableCallable, _resolve_dotted_path -from crewai.utilities.printer import Printer from crewai.utilities.pydantic_schema_utils import generate_model_description from crewai.utilities.string_utils import sanitize_tool_name -_printer = Printer() - P = ParamSpec("P") R = TypeVar("R", covariant=True) diff --git a/lib/crewai/src/crewai/tools/tool_usage.py b/lib/crewai/src/crewai/tools/tool_usage.py index 95adc0906..c99b32cf5 100644 --- a/lib/crewai/src/crewai/tools/tool_usage.py +++ b/lib/crewai/src/crewai/tools/tool_usage.py @@ -29,7 +29,7 @@ from crewai.utilities.agent_utils import ( ) from crewai.utilities.converter import Converter from crewai.utilities.i18n import I18N, get_i18n -from crewai.utilities.printer import Printer +from crewai.utilities.printer import PRINTER from crewai.utilities.string_utils import sanitize_tool_name @@ -94,7 +94,6 @@ class ToolUsage: fingerprint_context: dict[str, str] | None = None, ) -> None: self._i18n: I18N = agent.i18n if agent else get_i18n() - self._printer: Printer = Printer() self._telemetry: Telemetry = Telemetry() self._run_attempts: int = 1 self._max_parsing_attempts: int = 3 @@ -129,7 +128,7 @@ class ToolUsage: if isinstance(calling, ToolUsageError): error = calling.message if self.agent and self.agent.verbose: - self._printer.print(content=f"\n\n{error}\n", color="red") + PRINTER.print(content=f"\n\n{error}\n", color="red") if self.task: self.task.increment_tools_errors() return error @@ -141,7 +140,7 @@ class ToolUsage: if self.task: self.task.increment_tools_errors() if self.agent and self.agent.verbose: - self._printer.print(content=f"\n\n{error}\n", color="red") + PRINTER.print(content=f"\n\n{error}\n", color="red") return error if ( @@ -157,7 +156,7 @@ class ToolUsage: if self.task: self.task.increment_tools_errors() if self.agent and self.agent.verbose: - self._printer.print(content=f"\n\n{error}\n", color="red") + PRINTER.print(content=f"\n\n{error}\n", color="red") return error return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}" @@ -177,7 +176,7 @@ class ToolUsage: if isinstance(calling, ToolUsageError): error = calling.message if self.agent and self.agent.verbose: - self._printer.print(content=f"\n\n{error}\n", color="red") + PRINTER.print(content=f"\n\n{error}\n", color="red") if self.task: self.task.increment_tools_errors() return error @@ -189,7 +188,7 @@ class ToolUsage: if self.task: self.task.increment_tools_errors() if self.agent and self.agent.verbose: - self._printer.print(content=f"\n\n{error}\n", color="red") + PRINTER.print(content=f"\n\n{error}\n", color="red") return error if ( @@ -206,7 +205,7 @@ class ToolUsage: if self.task: self.task.increment_tools_errors() if self.agent and self.agent.verbose: - self._printer.print(content=f"\n\n{error}\n", color="red") + PRINTER.print(content=f"\n\n{error}\n", color="red") return error return ( @@ -391,7 +390,7 @@ class ToolUsage: and self.agent and self.agent.verbose ): - self._printer.print( + PRINTER.print( content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}", color="blue", ) @@ -405,7 +404,7 @@ class ToolUsage: and self.agent and self.agent.verbose ): - self._printer.print( + PRINTER.print( content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}", color="blue", ) @@ -429,9 +428,7 @@ class ToolUsage: if self.task: self.task.increment_tools_errors() if self.agent and self.agent.verbose: - self._printer.print( - content=f"\n\n{error_message}\n", color="red" - ) + PRINTER.print(content=f"\n\n{error_message}\n", color="red") else: if self.task: self.task.increment_tools_errors() @@ -626,7 +623,7 @@ class ToolUsage: and self.agent and self.agent.verbose ): - self._printer.print( + PRINTER.print( content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}", color="blue", ) @@ -640,7 +637,7 @@ class ToolUsage: and self.agent and self.agent.verbose ): - self._printer.print( + PRINTER.print( content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}", color="blue", ) @@ -664,9 +661,7 @@ class ToolUsage: if self.task: self.task.increment_tools_errors() if self.agent and self.agent.verbose: - self._printer.print( - content=f"\n\n{error_message}\n", color="red" - ) + PRINTER.print(content=f"\n\n{error_message}\n", color="red") else: if self.task: self.task.increment_tools_errors() @@ -859,7 +854,7 @@ class ToolUsage: if self.task: self.task.increment_tools_errors() if self.agent and self.agent.verbose: - self._printer.print(content=f"\n\n{e}\n", color="red") + PRINTER.print(content=f"\n\n{e}\n", color="red") return ToolUsageError( f"{self._i18n.errors('tool_usage_error').format(error=e)}\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}" ) @@ -903,16 +898,14 @@ class ToolUsage: try: repaired_input = str(repair_json(tool_input, skip_json_loads=True)) if self.agent and self.agent.verbose: - self._printer.print( - content=f"Repaired JSON: {repaired_input}", color="blue" - ) + PRINTER.print(content=f"Repaired JSON: {repaired_input}", color="blue") arguments = json.loads(repaired_input) if isinstance(arguments, dict): return arguments except Exception as e: error = f"Failed to repair JSON: {e}" if self.agent and self.agent.verbose: - self._printer.print(content=error, color="red") + PRINTER.print(content=error, color="red") error_message = ( "Tool input must be a valid dictionary in JSON or Python literal format" diff --git a/lib/crewai/src/crewai/utilities/agent_utils.py b/lib/crewai/src/crewai/utilities/agent_utils.py index 09c570fac..d448cd162 100644 --- a/lib/crewai/src/crewai/utilities/agent_utils.py +++ b/lib/crewai/src/crewai/utilities/agent_utils.py @@ -32,7 +32,7 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import ( LLMContextLengthExceededError, ) from crewai.utilities.i18n import I18N -from crewai.utilities.printer import ColoredText, Printer +from crewai.utilities.printer import PRINTER, ColoredText, Printer from crewai.utilities.pydantic_schema_utils import generate_model_description from crewai.utilities.string_utils import sanitize_tool_name from crewai.utilities.token_counter_callback import TokenCalcHandler @@ -946,7 +946,7 @@ def summarize_messages( summarized_contents: list[SummaryContent] = [] for idx, chunk in enumerate(chunks, 1): if verbose: - Printer().print( + PRINTER.print( content=f"Summarizing {idx}/{total_chunks}...", color="yellow", ) @@ -967,7 +967,7 @@ def summarize_messages( else: # Multiple chunks — summarize in parallel via asyncio if verbose: - Printer().print( + PRINTER.print( content=f"Summarizing {total_chunks} chunks in parallel...", color="yellow", ) diff --git a/lib/crewai/src/crewai/utilities/converter.py b/lib/crewai/src/crewai/utilities/converter.py index 67f542d53..328ecbdf9 100644 --- a/lib/crewai/src/crewai/utilities/converter.py +++ b/lib/crewai/src/crewai/utilities/converter.py @@ -10,7 +10,7 @@ from typing_extensions import Unpack from crewai.agents.agent_builder.utilities.base_output_converter import OutputConverter from crewai.utilities.i18n import get_i18n from crewai.utilities.internal_instructor import InternalInstructor -from crewai.utilities.printer import Printer +from crewai.utilities.printer import PRINTER from crewai.utilities.pydantic_schema_utils import generate_model_description @@ -209,7 +209,7 @@ def convert_to_model( except Exception as e: if agent and getattr(agent, "verbose", True): - Printer().print( + PRINTER.print( content=f"Unexpected error during model conversion: {type(e).__name__}: {e}. Returning original result.", color="red", ) @@ -267,7 +267,7 @@ def handle_partial_json( raise except Exception as e: if agent and getattr(agent, "verbose", True): - Printer().print( + PRINTER.print( content=f"Unexpected error during partial JSON handling: {type(e).__name__}: {e}. Attempting alternative conversion method.", color="red", ) @@ -329,7 +329,7 @@ def convert_with_instructions( if isinstance(exported_result, ConverterError): if agent and getattr(agent, "verbose", True): - Printer().print( + PRINTER.print( content=f"Failed to convert result to model: {exported_result}", color="red", ) diff --git a/lib/crewai/src/crewai/utilities/logger.py b/lib/crewai/src/crewai/utilities/logger.py index 6796f26e0..afc09d693 100644 --- a/lib/crewai/src/crewai/utilities/logger.py +++ b/lib/crewai/src/crewai/utilities/logger.py @@ -1,8 +1,8 @@ from datetime import datetime -from pydantic import BaseModel, Field, PrivateAttr +from pydantic import BaseModel, Field -from crewai.utilities.printer import ColoredText, Printer, PrinterColor +from crewai.utilities.printer import PRINTER, ColoredText, PrinterColor class Logger(BaseModel): @@ -14,7 +14,6 @@ class Logger(BaseModel): default="bold_yellow", description="Default color for log messages", ) - _printer: Printer = PrivateAttr(default_factory=Printer) def log(self, level: str, message: str, color: PrinterColor | None = None) -> None: """Log a message with timestamp if verbose mode is enabled. @@ -26,7 +25,7 @@ class Logger(BaseModel): """ if self.verbose: timestamp: str = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - self._printer.print( + PRINTER.print( [ ColoredText(f"\n[{timestamp}]", "cyan"), ColoredText(f"[{level.upper()}]: ", "yellow"), diff --git a/lib/crewai/src/crewai/utilities/printer.py b/lib/crewai/src/crewai/utilities/printer.py index 949da543a..bb0dfecba 100644 --- a/lib/crewai/src/crewai/utilities/printer.py +++ b/lib/crewai/src/crewai/utilities/printer.py @@ -93,3 +93,6 @@ class Printer: file=file, flush=flush, ) + + +PRINTER: Printer = Printer() diff --git a/lib/crewai/tests/agents/test_agent_executor.py b/lib/crewai/tests/agents/test_agent_executor.py index 91fa12f27..7a6260a44 100644 --- a/lib/crewai/tests/agents/test_agent_executor.py +++ b/lib/crewai/tests/agents/test_agent_executor.py @@ -48,8 +48,6 @@ def _build_executor(**kwargs: Any) -> AgentExecutor: executor._last_context_error = None executor._step_executor = None executor._planner_observer = None - from crewai.utilities.printer import Printer - executor._printer = Printer() from crewai.utilities.i18n import get_i18n executor._i18n = kwargs.get("i18n") or get_i18n() return executor @@ -1491,7 +1489,6 @@ class TestReasoningEffort: executor.handle_step_observed_medium = ( AgentExecutor.handle_step_observed_medium.__get__(executor) ) - executor._printer = Mock() # --- Case 1: step succeeded → should return "continue_plan" --- success_todo = TodoItem( @@ -1562,7 +1559,6 @@ class TestReasoningEffort: executor.handle_step_observed_low = ( AgentExecutor.handle_step_observed_low.__get__(executor) ) - executor._printer = Mock() todo = TodoItem( step_number=1, diff --git a/lib/crewai/tests/agents/test_lite_agent.py b/lib/crewai/tests/agents/test_lite_agent.py index 5397e6281..b42e2c1ec 100644 --- a/lib/crewai/tests/agents/test_lite_agent.py +++ b/lib/crewai/tests/agents/test_lite_agent.py @@ -1060,27 +1060,13 @@ def test_lite_agent_verbose_false_suppresses_printer_output(): verbose=False, ) - result = agent.kickoff("Say hello") + mock_printer = Mock() + with patch("crewai.lite_agent.PRINTER", mock_printer): + result = agent.kickoff("Say hello") assert result is not None assert isinstance(result, LiteAgentOutput) - # Verify the printer was never called - agent._printer.print = Mock() - # For a clean verification, patch printer before execution - with pytest.warns(DeprecationWarning): - agent2 = LiteAgent( - role="Test Agent", - goal="Test goal", - backstory="Test backstory", - llm=mock_llm, - verbose=False, - ) - - mock_printer = Mock() - agent2._printer = mock_printer - - agent2.kickoff("Say hello") - + # Verify the printer was never called when verbose=False mock_printer.print.assert_not_called() diff --git a/lib/crewai/tests/tools/test_tool_usage.py b/lib/crewai/tests/tools/test_tool_usage.py index b68a41666..ba2e797d9 100644 --- a/lib/crewai/tests/tools/test_tool_usage.py +++ b/lib/crewai/tests/tools/test_tool_usage.py @@ -529,9 +529,6 @@ def test_tool_validate_input_error_event(): mock_task = MagicMock() mock_tools_handler = MagicMock() - # Mock printer - mock_printer = MagicMock() - # Create test tool class TestTool(BaseTool): name: str = "Test Tool" @@ -551,8 +548,6 @@ def test_tool_validate_input_error_event(): agent=mock_agent, action=MagicMock(tool="test_tool"), ) - tool_usage._printer = mock_printer - # Mock all parsing attempts to fail with ( patch("json.loads", side_effect=json.JSONDecodeError("Test Error", "", 0)), diff --git a/lib/crewai/tests/utilities/test_converter.py b/lib/crewai/tests/utilities/test_converter.py index 017f7f8ae..2df350c0d 100644 --- a/lib/crewai/tests/utilities/test_converter.py +++ b/lib/crewai/tests/utilities/test_converter.py @@ -207,10 +207,10 @@ def test_convert_with_instructions_failure( mock_create_converter.return_value = mock_converter result = "Some text to convert" - with patch("crewai.utilities.converter.Printer") as mock_printer: + with patch("crewai.utilities.converter.PRINTER") as mock_printer: output = convert_with_instructions(result, SimpleModel, False, mock_agent) assert output == result - mock_printer.return_value.print.assert_called_once() + mock_printer.print.assert_called_once() # Tests for get_conversion_instructions