feat: add from_checkpoint parameter to Agent.kickoff, kickoff_async, akickoff

This commit is contained in:
Greyson LaLonde
2026-04-17 03:40:37 +08:00
committed by GitHub
parent 6136228a66
commit 54391fdbdf
2 changed files with 55 additions and 1 deletions

View File

@@ -84,6 +84,7 @@ from crewai.rag.embeddings.types import EmbedderConfig
from crewai.security.fingerprint import Fingerprint
from crewai.skills.loader import activate_skill, discover_skills
from crewai.skills.models import INSTRUCTIONS, Skill as SkillModel
from crewai.state.checkpoint_config import CheckpointConfig, apply_checkpoint
from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.types.callback import SerializableCallable
from crewai.utilities.agent_utils import (
@@ -1457,6 +1458,7 @@ class Agent(BaseAgent):
messages: str | list[LLMMessage],
response_format: type[Any] | None = None,
input_files: dict[str, FileInput] | None = None,
from_checkpoint: CheckpointConfig | None = None,
) -> LiteAgentOutput | Coroutine[Any, Any, LiteAgentOutput]:
"""Execute the agent with the given messages using the AgentExecutor.
@@ -1475,6 +1477,9 @@ class Agent(BaseAgent):
response_format: Optional Pydantic model for structured output.
input_files: Optional dict of named files to attach to the message.
Files can be paths, bytes, or File objects from crewai_files.
from_checkpoint: Optional checkpoint config. If ``restore_from``
is set, the agent resumes from that checkpoint. Remaining
config fields enable checkpointing for the run.
Returns:
LiteAgentOutput: The result of the agent execution.
@@ -1483,6 +1488,14 @@ class Agent(BaseAgent):
Note:
For explicit async usage outside of Flow, use kickoff_async() directly.
"""
restored = apply_checkpoint(self, from_checkpoint)
if restored is not None:
return restored.kickoff( # type: ignore[no-any-return]
messages=messages,
response_format=response_format,
input_files=input_files,
)
if is_inside_event_loop():
return self.kickoff_async(messages, response_format, input_files)
@@ -1760,6 +1773,7 @@ class Agent(BaseAgent):
messages: str | list[LLMMessage],
response_format: type[Any] | None = None,
input_files: dict[str, FileInput] | None = None,
from_checkpoint: CheckpointConfig | None = None,
) -> LiteAgentOutput:
"""Execute the agent asynchronously with the given messages.
@@ -1775,10 +1789,20 @@ class Agent(BaseAgent):
response_format: Optional Pydantic model for structured output.
input_files: Optional dict of named files to attach to the message.
Files can be paths, bytes, or File objects from crewai_files.
from_checkpoint: Optional checkpoint config. If ``restore_from``
is set, the agent resumes from that checkpoint.
Returns:
LiteAgentOutput: The result of the agent execution.
"""
restored = apply_checkpoint(self, from_checkpoint)
if restored is not None:
return await restored.kickoff_async( # type: ignore[no-any-return]
messages=messages,
response_format=response_format,
input_files=input_files,
)
executor, inputs, agent_info, parsed_tools = self._prepare_kickoff(
messages, response_format, input_files
)
@@ -1808,6 +1832,7 @@ class Agent(BaseAgent):
messages: str | list[LLMMessage],
response_format: type[Any] | None = None,
input_files: dict[str, FileInput] | None = None,
from_checkpoint: CheckpointConfig | None = None,
) -> LiteAgentOutput:
"""Async version of kickoff. Alias for kickoff_async.
@@ -1815,8 +1840,12 @@ class Agent(BaseAgent):
messages: Either a string query or a list of message dictionaries.
response_format: Optional Pydantic model for structured output.
input_files: Optional dict of named files to attach to the message.
from_checkpoint: Optional checkpoint config. If ``restore_from``
is set, the agent resumes from that checkpoint.
Returns:
LiteAgentOutput: The result of the agent execution.
"""
return await self.kickoff_async(messages, response_format, input_files)
return await self.kickoff_async(
messages, response_format, input_files, from_checkpoint
)

View File

@@ -523,6 +523,31 @@ class TestKickoffFromCheckpoint:
assert isinstance(crew.checkpoint, CheckpointConfig)
assert crew.checkpoint.on_events == ["task_completed"]
def test_agent_kickoff_delegates_to_from_checkpoint(self) -> None:
mock_restored = MagicMock(spec=Agent)
mock_restored.kickoff.return_value = "agent_result"
cfg = CheckpointConfig(restore_from="/path/to/agent_cp.json")
with patch.object(Agent, "from_checkpoint", return_value=mock_restored):
agent = Agent(role="r", goal="g", backstory="b", llm="gpt-4o-mini")
result = agent.kickoff(messages="hello", from_checkpoint=cfg)
mock_restored.kickoff.assert_called_once_with(
messages="hello", response_format=None, input_files=None
)
assert mock_restored.checkpoint.restore_from is None
assert result == "agent_result"
def test_agent_kickoff_config_only_sets_checkpoint(self) -> None:
cfg = CheckpointConfig(on_events=["lite_agent_execution_completed"])
agent = Agent(role="r", goal="g", backstory="b", llm="gpt-4o-mini")
assert agent.checkpoint is None
with patch.object(Agent, "_prepare_kickoff", side_effect=RuntimeError("stop")):
with pytest.raises(RuntimeError, match="stop"):
agent.kickoff(messages="hello", from_checkpoint=cfg)
assert isinstance(agent.checkpoint, CheckpointConfig)
assert agent.checkpoint.on_events == ["lite_agent_execution_completed"]
def test_flow_kickoff_delegates_to_from_checkpoint(self) -> None:
mock_restored = MagicMock(spec=Flow)
mock_restored.kickoff.return_value = "flow_result"