diff --git a/docs/ar/changelog.mdx b/docs/ar/changelog.mdx
index ff996fff3..8fbb2d750 100644
--- a/docs/ar/changelog.mdx
+++ b/docs/ar/changelog.mdx
@@ -4,6 +4,28 @@ description: "تحديثات المنتج والتحسينات وإصلاحات
icon: "clock"
mode: "wide"
---
+
+ ## v1.13.0a7
+
+ [عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0a7)
+
+ ## ما الذي تغير
+
+ ### الميزات
+ - إضافة امتداد A2UI مع دعم v0.8/v0.9، والمخططات، والوثائق
+
+ ### إصلاحات الأخطاء
+ - إصلاح بادئات الرؤية متعددة الأنماط عن طريق إضافة GPT-5 وسلسلة o
+
+ ### الوثائق
+ - تحديث سجل التغييرات والإصدار لـ v1.13.0a6
+
+ ## المساهمون
+
+ @alex-clawd, @greysonlalonde, @joaomdmoura
+
+
+
## v1.13.0a6
diff --git a/docs/en/changelog.mdx b/docs/en/changelog.mdx
index b62dceebb..037db203e 100644
--- a/docs/en/changelog.mdx
+++ b/docs/en/changelog.mdx
@@ -4,6 +4,28 @@ description: "Product updates, improvements, and bug fixes for CrewAI"
icon: "clock"
mode: "wide"
---
+
+ ## v1.13.0a7
+
+ [View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0a7)
+
+ ## What's Changed
+
+ ### Features
+ - Add A2UI extension with v0.8/v0.9 support, schemas, and docs
+
+ ### Bug Fixes
+ - Fix multimodal vision prefixes by adding GPT-5 and o-series
+
+ ### Documentation
+ - Update changelog and version for v1.13.0a6
+
+ ## Contributors
+
+ @alex-clawd, @greysonlalonde, @joaomdmoura
+
+
+
## v1.13.0a6
diff --git a/docs/ko/changelog.mdx b/docs/ko/changelog.mdx
index 957d51723..f4f30ae07 100644
--- a/docs/ko/changelog.mdx
+++ b/docs/ko/changelog.mdx
@@ -4,6 +4,28 @@ description: "CrewAI의 제품 업데이트, 개선 사항 및 버그 수정"
icon: "clock"
mode: "wide"
---
+
+ ## v1.13.0a7
+
+ [GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0a7)
+
+ ## 변경 사항
+
+ ### 기능
+ - v0.8/v0.9 지원, 스키마 및 문서가 포함된 A2UI 확장 추가
+
+ ### 버그 수정
+ - GPT-5 및 o-series를 추가하여 다중 모드 비전 접두사 수정
+
+ ### 문서
+ - v1.13.0a6에 대한 변경 로그 및 버전 업데이트
+
+ ## 기여자
+
+ @alex-clawd, @greysonlalonde, @joaomdmoura
+
+
+
## v1.13.0a6
diff --git a/docs/pt-BR/changelog.mdx b/docs/pt-BR/changelog.mdx
index 2126ae851..3173bcf1b 100644
--- a/docs/pt-BR/changelog.mdx
+++ b/docs/pt-BR/changelog.mdx
@@ -4,6 +4,28 @@ description: "Atualizações de produto, melhorias e correções do CrewAI"
icon: "clock"
mode: "wide"
---
+
+ ## v1.13.0a7
+
+ [Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0a7)
+
+ ## O que Mudou
+
+ ### Funcionalidades
+ - Adicionar a extensão A2UI com suporte a v0.8/v0.9, esquemas e documentação
+
+ ### Correções de Bugs
+ - Corrigir prefixos de visão multimodal adicionando GPT-5 e o-series
+
+ ### Documentação
+ - Atualizar changelog e versão para v1.13.0a6
+
+ ## Contribuidores
+
+ @alex-clawd, @greysonlalonde, @joaomdmoura
+
+
+
## v1.13.0a6
diff --git a/lib/crewai-files/src/crewai_files/__init__.py b/lib/crewai-files/src/crewai_files/__init__.py
index 1b79e738c..26da7d77f 100644
--- a/lib/crewai-files/src/crewai_files/__init__.py
+++ b/lib/crewai-files/src/crewai_files/__init__.py
@@ -152,4 +152,4 @@ __all__ = [
"wrap_file_source",
]
-__version__ = "1.13.0a6"
+__version__ = "1.13.0a7"
diff --git a/lib/crewai-tools/pyproject.toml b/lib/crewai-tools/pyproject.toml
index ef65d3a54..30da18c45 100644
--- a/lib/crewai-tools/pyproject.toml
+++ b/lib/crewai-tools/pyproject.toml
@@ -11,7 +11,7 @@ dependencies = [
"pytube~=15.0.0",
"requests~=2.32.5",
"docker~=7.1.0",
- "crewai==1.13.0a6",
+ "crewai==1.13.0a7",
"tiktoken~=0.8.0",
"beautifulsoup4~=4.13.4",
"python-docx~=1.2.0",
diff --git a/lib/crewai-tools/src/crewai_tools/__init__.py b/lib/crewai-tools/src/crewai_tools/__init__.py
index 1c0d7271a..292596708 100644
--- a/lib/crewai-tools/src/crewai_tools/__init__.py
+++ b/lib/crewai-tools/src/crewai_tools/__init__.py
@@ -309,4 +309,4 @@ __all__ = [
"ZapierActionTools",
]
-__version__ = "1.13.0a6"
+__version__ = "1.13.0a7"
diff --git a/lib/crewai/pyproject.toml b/lib/crewai/pyproject.toml
index 705cdcb6f..0133eaffa 100644
--- a/lib/crewai/pyproject.toml
+++ b/lib/crewai/pyproject.toml
@@ -54,7 +54,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = [
- "crewai-tools==1.13.0a6",
+ "crewai-tools==1.13.0a7",
]
embeddings = [
"tiktoken~=0.8.0"
diff --git a/lib/crewai/src/crewai/__init__.py b/lib/crewai/src/crewai/__init__.py
index 2ebfbf99b..64d459358 100644
--- a/lib/crewai/src/crewai/__init__.py
+++ b/lib/crewai/src/crewai/__init__.py
@@ -10,6 +10,7 @@ from crewai.agent.core import Agent
from crewai.agent.planning_config import PlanningConfig
from crewai.crew import Crew
from crewai.crews.crew_output import CrewOutput
+from crewai.execution_context import ExecutionContext
from crewai.flow.flow import Flow
from crewai.knowledge.knowledge import Knowledge
from crewai.llm import LLM
@@ -44,7 +45,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
_suppress_pydantic_deprecation_warnings()
-__version__ = "1.13.0a6"
+__version__ = "1.13.0a7"
_telemetry_submitted = False
@@ -96,6 +97,10 @@ def __getattr__(name: str) -> Any:
try:
+ from crewai.agents.agent_builder.base_agent import BaseAgent as _BaseAgent
+ from crewai.agents.agent_builder.base_agent_executor_mixin import (
+ CrewAgentExecutorMixin as _CrewAgentExecutorMixin,
+ )
from crewai.agents.tools_handler import ToolsHandler as _ToolsHandler
from crewai.experimental.agent_executor import AgentExecutor as _AgentExecutor
from crewai.hooks.llm_hooks import LLMCallHookContext as _LLMCallHookContext
@@ -105,25 +110,66 @@ try:
SystemPromptResult as _SystemPromptResult,
)
- _AgentExecutor.model_rebuild(
- force=True,
- _types_namespace={
- "Agent": Agent,
- "ToolsHandler": _ToolsHandler,
- "Crew": Crew,
- "BaseLLM": BaseLLM,
- "Task": Task,
- "StandardPromptResult": _StandardPromptResult,
- "SystemPromptResult": _SystemPromptResult,
- "LLMCallHookContext": _LLMCallHookContext,
- "ToolResult": _ToolResult,
- },
- )
+ _base_namespace: dict[str, type] = {
+ "Agent": Agent,
+ "Crew": Crew,
+ "BaseLLM": BaseLLM,
+ "Task": Task,
+ "CrewAgentExecutorMixin": _CrewAgentExecutorMixin,
+ }
+
+ try:
+ from crewai.a2a.config import (
+ A2AClientConfig as _A2AClientConfig,
+ A2AConfig as _A2AConfig,
+ A2AServerConfig as _A2AServerConfig,
+ )
+
+ _base_namespace.update(
+ {
+ "A2AConfig": _A2AConfig,
+ "A2AClientConfig": _A2AClientConfig,
+ "A2AServerConfig": _A2AServerConfig,
+ }
+ )
+ except ImportError:
+ pass
+
+ import sys
+
+ _full_namespace = {
+ **_base_namespace,
+ "ToolsHandler": _ToolsHandler,
+ "StandardPromptResult": _StandardPromptResult,
+ "SystemPromptResult": _SystemPromptResult,
+ "LLMCallHookContext": _LLMCallHookContext,
+ "ToolResult": _ToolResult,
+ }
+
+ _resolve_namespace = {
+ **_full_namespace,
+ **sys.modules[_BaseAgent.__module__].__dict__,
+ }
+
+ for _mod_name in (
+ _BaseAgent.__module__,
+ Agent.__module__,
+ _AgentExecutor.__module__,
+ ):
+ sys.modules[_mod_name].__dict__.update(_resolve_namespace)
+
+ _BaseAgent.model_rebuild(force=True, _types_namespace=_full_namespace)
+ _AgentExecutor.model_rebuild(force=True, _types_namespace=_full_namespace)
+
+ try:
+ Agent.model_rebuild(force=True, _types_namespace=_full_namespace)
+ except PydanticUserError:
+ pass
except (ImportError, PydanticUserError):
import logging as _logging
_logging.getLogger(__name__).warning(
- "AgentExecutor.model_rebuild() failed; forward refs may be unresolved.",
+ "model_rebuild() failed; forward refs may be unresolved.",
exc_info=True,
)
@@ -133,6 +179,7 @@ __all__ = [
"BaseLLM",
"Crew",
"CrewOutput",
+ "ExecutionContext",
"Flow",
"Knowledge",
"LLMGuardrail",
diff --git a/lib/crewai/src/crewai/agent/core.py b/lib/crewai/src/crewai/agent/core.py
index e125dd7d4..760268a1d 100644
--- a/lib/crewai/src/crewai/agent/core.py
+++ b/lib/crewai/src/crewai/agent/core.py
@@ -25,6 +25,7 @@ from pydantic import (
BaseModel,
ConfigDict,
Field,
+ InstanceOf,
PrivateAttr,
model_validator,
)
@@ -267,6 +268,9 @@ class Agent(BaseAgent):
Can be a single A2AConfig/A2AClientConfig/A2AServerConfig, or a list of any number of A2AConfig/A2AClientConfig with a single A2AServerConfig.
""",
)
+ agent_executor: InstanceOf[CrewAgentExecutor] | InstanceOf[AgentExecutor] | None = (
+ Field(default=None, description="An instance of the CrewAgentExecutor class.")
+ )
executor_class: type[CrewAgentExecutor] | type[AgentExecutor] = Field(
default=CrewAgentExecutor,
description="Class to use for the agent executor. Defaults to CrewAgentExecutor, can optionally use AgentExecutor.",
@@ -690,7 +694,9 @@ class Agent(BaseAgent):
task_prompt,
knowledge_config,
self.knowledge.query if self.knowledge else lambda *a, **k: None,
- self.crew.query_knowledge if self.crew else lambda *a, **k: None,
+ self.crew.query_knowledge
+ if self.crew and not isinstance(self.crew, str)
+ else lambda *a, **k: None,
)
task_prompt = self._finalize_task_prompt(task_prompt, tools, task)
@@ -777,14 +783,18 @@ class Agent(BaseAgent):
if not self.agent_executor:
raise RuntimeError("Agent executor is not initialized.")
- return self.agent_executor.invoke(
- {
- "input": task_prompt,
- "tool_names": self.agent_executor.tools_names,
- "tools": self.agent_executor.tools_description,
- "ask_for_human_input": task.human_input,
- }
- )["output"]
+ result = cast(
+ dict[str, Any],
+ self.agent_executor.invoke(
+ {
+ "input": task_prompt,
+ "tool_names": self.agent_executor.tools_names,
+ "tools": self.agent_executor.tools_description,
+ "ask_for_human_input": task.human_input,
+ }
+ ),
+ )
+ return result["output"]
async def aexecute_task(
self,
@@ -955,19 +965,23 @@ class Agent(BaseAgent):
if self.agent_executor is not None:
self._update_executor_parameters(
task=task,
- tools=parsed_tools, # type: ignore[arg-type]
+ tools=parsed_tools,
raw_tools=raw_tools,
prompt=prompt,
stop_words=stop_words,
rpm_limit_fn=rpm_limit_fn,
)
else:
+ if not isinstance(self.llm, BaseLLM):
+ raise RuntimeError(
+ "LLM must be resolved before creating agent executor."
+ )
self.agent_executor = self.executor_class(
- llm=cast(BaseLLM, self.llm),
+ llm=self.llm,
task=task, # type: ignore[arg-type]
i18n=self.i18n,
agent=self,
- crew=self.crew,
+ crew=self.crew, # type: ignore[arg-type]
tools=parsed_tools,
prompt=prompt,
original_tools=raw_tools,
@@ -991,7 +1005,7 @@ class Agent(BaseAgent):
def _update_executor_parameters(
self,
task: Task | None,
- tools: list[BaseTool],
+ tools: list[CrewStructuredTool],
raw_tools: list[BaseTool],
prompt: SystemPromptResult | StandardPromptResult,
stop_words: list[str],
@@ -1007,11 +1021,17 @@ class Agent(BaseAgent):
stop_words: Stop words list.
rpm_limit_fn: RPM limit callback function.
"""
+ if self.agent_executor is None:
+ raise RuntimeError("Agent executor is not initialized.")
+
self.agent_executor.task = task
self.agent_executor.tools = tools
self.agent_executor.original_tools = raw_tools
self.agent_executor.prompt = prompt
- self.agent_executor.stop_words = stop_words
+ if isinstance(self.agent_executor, AgentExecutor):
+ self.agent_executor.stop_words = stop_words
+ else:
+ self.agent_executor.stop = stop_words
self.agent_executor.tools_names = get_tool_names(tools)
self.agent_executor.tools_description = render_text_description_and_args(tools)
self.agent_executor.response_model = (
@@ -1787,21 +1807,3 @@ class Agent(BaseAgent):
LiteAgentOutput: The result of the agent execution.
"""
return await self.kickoff_async(messages, response_format, input_files)
-
-
-try:
- from crewai.a2a.config import (
- A2AClientConfig as _A2AClientConfig,
- A2AConfig as _A2AConfig,
- A2AServerConfig as _A2AServerConfig,
- )
-
- Agent.model_rebuild(
- _types_namespace={
- "A2AConfig": _A2AConfig,
- "A2AClientConfig": _A2AClientConfig,
- "A2AServerConfig": _A2AServerConfig,
- }
- )
-except ImportError:
- pass
diff --git a/lib/crewai/src/crewai/agent/utils.py b/lib/crewai/src/crewai/agent/utils.py
index 88accddf3..8690c8faf 100644
--- a/lib/crewai/src/crewai/agent/utils.py
+++ b/lib/crewai/src/crewai/agent/utils.py
@@ -137,7 +137,8 @@ def handle_knowledge_retrieval(
Returns:
The task prompt potentially augmented with knowledge context.
"""
- if not (agent.knowledge or (agent.crew and agent.crew.knowledge)):
+ _crew = agent.crew if not isinstance(agent.crew, str) else None
+ if not (agent.knowledge or (_crew and _crew.knowledge)):
return task_prompt
crewai_event_bus.emit(
@@ -244,7 +245,7 @@ def apply_training_data(agent: Agent, task_prompt: str) -> str:
Returns:
The task prompt with training data applied.
"""
- if agent.crew and agent.crew._train:
+ if agent.crew and not isinstance(agent.crew, str) and agent.crew._train:
return agent._training_handler(task_prompt=task_prompt)
return agent._use_trained_data(task_prompt=task_prompt)
@@ -355,7 +356,8 @@ async def ahandle_knowledge_retrieval(
Returns:
The task prompt potentially augmented with knowledge context.
"""
- if not (agent.knowledge or (agent.crew and agent.crew.knowledge)):
+ _crew = agent.crew if not isinstance(agent.crew, str) else None
+ if not (agent.knowledge or (_crew and _crew.knowledge)):
return task_prompt
crewai_event_bus.emit(
@@ -381,15 +383,16 @@ async def ahandle_knowledge_retrieval(
if agent.agent_knowledge_context:
task_prompt += agent.agent_knowledge_context
- knowledge_snippets = await agent.crew.aquery_knowledge(
- [agent.knowledge_search_query], **knowledge_config
- )
- if knowledge_snippets:
- agent.crew_knowledge_context = extract_knowledge_context(
- knowledge_snippets
+ if _crew:
+ knowledge_snippets = await _crew.aquery_knowledge(
+ [agent.knowledge_search_query], **knowledge_config
)
- if agent.crew_knowledge_context:
- task_prompt += agent.crew_knowledge_context
+ if knowledge_snippets:
+ agent.crew_knowledge_context = extract_knowledge_context(
+ knowledge_snippets
+ )
+ if agent.crew_knowledge_context:
+ task_prompt += agent.crew_knowledge_context
crewai_event_bus.emit(
agent,
diff --git a/lib/crewai/src/crewai/agents/agent_adapters/openai_agents/openai_adapter.py b/lib/crewai/src/crewai/agents/agent_adapters/openai_agents/openai_adapter.py
index 58687276a..568f5e83e 100644
--- a/lib/crewai/src/crewai/agents/agent_adapters/openai_agents/openai_adapter.py
+++ b/lib/crewai/src/crewai/agents/agent_adapters/openai_agents/openai_adapter.py
@@ -188,14 +188,14 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
self._openai_agent = OpenAIAgent(
name=self.role,
instructions=instructions,
- model=self.llm,
+ model=str(self.llm),
**self._agent_config or {},
)
if all_tools:
self.configure_tools(all_tools)
- self.agent_executor = Runner
+ self.agent_executor = Runner # type: ignore[assignment]
def configure_tools(self, tools: list[BaseTool] | None = None) -> None:
"""Configure tools for the OpenAI Assistant.
diff --git a/lib/crewai/src/crewai/agents/agent_builder/base_agent.py b/lib/crewai/src/crewai/agents/agent_builder/base_agent.py
index ce5682266..f487a0d8c 100644
--- a/lib/crewai/src/crewai/agents/agent_builder/base_agent.py
+++ b/lib/crewai/src/crewai/agents/agent_builder/base_agent.py
@@ -5,21 +5,25 @@ from copy import copy as shallow_copy
from hashlib import md5
from pathlib import Path
import re
-from typing import Any, Final, Literal
+from typing import TYPE_CHECKING, Annotated, Any, Final, Literal
import uuid
from pydantic import (
UUID4,
BaseModel,
+ BeforeValidator,
Field,
+ InstanceOf,
PrivateAttr,
field_validator,
model_validator,
)
+from pydantic.functional_serializers import PlainSerializer
from pydantic_core import PydanticCustomError
from typing_extensions import Self
from crewai.agent.internal.meta import AgentMeta
+from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
from crewai.agents.cache.cache_handler import CacheHandler
from crewai.agents.tools_handler import ToolsHandler
@@ -27,6 +31,7 @@ from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.knowledge_config import KnowledgeConfig
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
+from crewai.llms.base_llm import BaseLLM
from crewai.mcp.config import MCPServerConfig
from crewai.memory.memory_scope import MemoryScope, MemorySlice
from crewai.memory.unified_memory import Memory
@@ -42,6 +47,20 @@ from crewai.utilities.rpm_controller import RPMController
from crewai.utilities.string_utils import interpolate_only
+if TYPE_CHECKING:
+ from crewai.crew import Crew
+
+
+def _validate_crew_ref(value: Any) -> Any:
+ return value
+
+
+def _serialize_crew_ref(value: Any) -> str | None:
+ if value is None:
+ return None
+ return str(value.id) if hasattr(value, "id") else str(value)
+
+
_SLUG_RE: Final[re.Pattern[str]] = re.compile(
r"^(?:crewai-amp:)?[a-zA-Z0-9][a-zA-Z0-9_-]*(?:#[\w-]+)?$"
)
@@ -122,7 +141,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
__hash__ = object.__hash__
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=False))
_rpm_controller: RPMController | None = PrivateAttr(default=None)
- _request_within_rpm_limit: Any = PrivateAttr(default=None)
+ _request_within_rpm_limit: SerializableCallable | None = PrivateAttr(default=None)
_original_role: str | None = PrivateAttr(default=None)
_original_goal: str | None = PrivateAttr(default=None)
_original_backstory: str | None = PrivateAttr(default=None)
@@ -154,13 +173,19 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
max_iter: int = Field(
default=25, description="Maximum iterations for an agent to execute a task"
)
- agent_executor: Any = Field(
+ agent_executor: InstanceOf[CrewAgentExecutorMixin] | None = Field(
default=None, description="An instance of the CrewAgentExecutor class."
)
- llm: Any = Field(
+ llm: str | BaseLLM | None = Field(
default=None, description="Language model that will run the agent."
)
- crew: Any = Field(default=None, description="Crew to which the agent belongs.")
+ crew: Annotated[
+ Crew | str | None,
+ BeforeValidator(_validate_crew_ref),
+ PlainSerializer(
+ _serialize_crew_ref, return_type=str | None, when_used="always"
+ ),
+ ] = Field(default=None, description="Crew to which the agent belongs.")
i18n: I18N = Field(
default_factory=get_i18n, description="Internationalization settings."
)
diff --git a/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml b/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml
index 8920f2052..a14ab19cd 100644
--- a/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml
+++ b/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
- "crewai[tools]==1.13.0a6"
+ "crewai[tools]==1.13.0a7"
]
[project.scripts]
diff --git a/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml b/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml
index 08346e304..914232bb0 100644
--- a/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml
+++ b/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
- "crewai[tools]==1.13.0a6"
+ "crewai[tools]==1.13.0a7"
]
[project.scripts]
diff --git a/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml b/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml
index 178537741..21457ceda 100644
--- a/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml
+++ b/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
dependencies = [
- "crewai[tools]==1.13.0a6"
+ "crewai[tools]==1.13.0a7"
]
[tool.crewai]
diff --git a/lib/crewai/src/crewai/crew.py b/lib/crewai/src/crewai/crew.py
index 00107b063..3b18a2753 100644
--- a/lib/crewai/src/crewai/crew.py
+++ b/lib/crewai/src/crewai/crew.py
@@ -266,7 +266,7 @@ class Crew(FlowTrackable, BaseModel):
default=False,
description="Plan the crew execution and add the plan to the crew.",
)
- planning_llm: str | BaseLLM | Any | None = Field(
+ planning_llm: str | BaseLLM | None = Field(
default=None,
description=(
"Language model that will run the AgentPlanner if planning is True."
@@ -287,7 +287,7 @@ class Crew(FlowTrackable, BaseModel):
"knowledge object."
),
)
- chat_llm: str | BaseLLM | Any | None = Field(
+ chat_llm: str | BaseLLM | None = Field(
default=None,
description="LLM used to handle chatting with the crew.",
)
@@ -1311,7 +1311,7 @@ class Crew(FlowTrackable, BaseModel):
and hasattr(agent, "multimodal")
and getattr(agent, "multimodal", False)
):
- if not (agent.llm and agent.llm.supports_multimodal()):
+ if not (isinstance(agent.llm, BaseLLM) and agent.llm.supports_multimodal()):
tools = self._add_multimodal_tools(agent, tools)
if agent and (hasattr(agent, "apps") and getattr(agent, "apps", None)):
@@ -1328,7 +1328,11 @@ class Crew(FlowTrackable, BaseModel):
files = get_all_files(self.id, task.id)
if files:
supported_types: list[str] = []
- if agent and agent.llm and agent.llm.supports_multimodal():
+ if (
+ agent
+ and isinstance(agent.llm, BaseLLM)
+ and agent.llm.supports_multimodal()
+ ):
provider = (
getattr(agent.llm, "provider", None)
or getattr(agent.llm, "model", None)
@@ -1781,17 +1785,10 @@ class Crew(FlowTrackable, BaseModel):
token_sum = self.manager_agent._token_process.get_summary()
total_usage_metrics.add_usage_metrics(token_sum)
- if (
- self.manager_agent
- and hasattr(self.manager_agent, "llm")
- and hasattr(self.manager_agent.llm, "get_token_usage_summary")
- ):
+ if self.manager_agent:
if isinstance(self.manager_agent.llm, BaseLLM):
llm_usage = self.manager_agent.llm.get_token_usage_summary()
- else:
- llm_usage = self.manager_agent.llm._token_process.get_summary()
-
- total_usage_metrics.add_usage_metrics(llm_usage)
+ total_usage_metrics.add_usage_metrics(llm_usage)
self.usage_metrics = total_usage_metrics
return total_usage_metrics
diff --git a/lib/crewai/src/crewai/crews/utils.py b/lib/crewai/src/crewai/crews/utils.py
index 0b50e60bb..2b62240d2 100644
--- a/lib/crewai/src/crewai/crews/utils.py
+++ b/lib/crewai/src/crewai/crews/utils.py
@@ -11,6 +11,7 @@ from opentelemetry import baggage
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.crews.crew_output import CrewOutput
+from crewai.llms.base_llm import BaseLLM
from crewai.rag.embeddings.types import EmbedderConfig
from crewai.skills.loader import activate_skill, discover_skills
from crewai.skills.models import INSTRUCTIONS, Skill as SkillModel
@@ -50,7 +51,7 @@ def enable_agent_streaming(agents: Iterable[BaseAgent]) -> None:
agents: Iterable of agents to enable streaming on.
"""
for agent in agents:
- if agent.llm is not None:
+ if isinstance(agent.llm, BaseLLM):
agent.llm.stream = True
diff --git a/lib/crewai/src/crewai/events/base_events.py b/lib/crewai/src/crewai/events/base_events.py
index 6eeaa06e8..bceeccbf6 100644
--- a/lib/crewai/src/crewai/events/base_events.py
+++ b/lib/crewai/src/crewai/events/base_events.py
@@ -25,13 +25,25 @@ def _get_or_create_counter() -> Iterator[int]:
return counter
+_last_emitted: contextvars.ContextVar[int] = contextvars.ContextVar(
+ "_last_emitted", default=0
+)
+
+
def get_next_emission_sequence() -> int:
"""Get the next emission sequence number.
Returns:
The next sequence number.
"""
- return next(_get_or_create_counter())
+ seq = next(_get_or_create_counter())
+ _last_emitted.set(seq)
+ return seq
+
+
+def get_emission_sequence() -> int:
+ """Get the current emission sequence value without incrementing."""
+ return _last_emitted.get()
def reset_emission_counter() -> None:
@@ -41,6 +53,14 @@ def reset_emission_counter() -> None:
"""
counter: Iterator[int] = itertools.count(start=1)
_emission_counter.set(counter)
+ _last_emitted.set(0)
+
+
+def set_emission_counter(start: int) -> None:
+ """Set the emission counter to resume from a given value."""
+ counter: Iterator[int] = itertools.count(start=start + 1)
+ _emission_counter.set(counter)
+ _last_emitted.set(start)
class BaseEvent(BaseModel):
diff --git a/lib/crewai/src/crewai/execution_context.py b/lib/crewai/src/crewai/execution_context.py
new file mode 100644
index 000000000..7bad1fd2c
--- /dev/null
+++ b/lib/crewai/src/crewai/execution_context.py
@@ -0,0 +1,80 @@
+"""Checkpointable execution context for the crewAI runtime.
+
+Captures the ContextVar state needed to resume execution from a checkpoint.
+Used by the RootModel (step 5) to include execution context in snapshots.
+"""
+
+from __future__ import annotations
+
+from typing import Any
+
+from pydantic import BaseModel, Field
+
+from crewai.context import (
+ _current_task_id,
+ _platform_integration_token,
+)
+from crewai.events.base_events import (
+ get_emission_sequence,
+ set_emission_counter,
+)
+from crewai.events.event_context import (
+ _event_id_stack,
+ _last_event_id,
+ _triggering_event_id,
+)
+from crewai.flow.flow_context import (
+ current_flow_id,
+ current_flow_method_name,
+ current_flow_request_id,
+)
+
+
+class ExecutionContext(BaseModel):
+ """Snapshot of ContextVar state required for checkpoint/resume."""
+
+ current_task_id: str | None = Field(default=None)
+ flow_request_id: str | None = Field(default=None)
+ flow_id: str | None = Field(default=None)
+ flow_method_name: str = Field(default="unknown")
+
+ event_id_stack: tuple[tuple[str, str], ...] = Field(default=())
+ last_event_id: str | None = Field(default=None)
+ triggering_event_id: str | None = Field(default=None)
+ emission_sequence: int = Field(default=0)
+
+ feedback_callback_info: dict[str, Any] | None = Field(default=None)
+ platform_token: str | None = Field(default=None)
+
+
+def capture_execution_context(
+ feedback_callback_info: dict[str, Any] | None = None,
+) -> ExecutionContext:
+ """Read all checkpoint-required ContextVars into an ExecutionContext."""
+ return ExecutionContext(
+ current_task_id=_current_task_id.get(),
+ flow_request_id=current_flow_request_id.get(),
+ flow_id=current_flow_id.get(),
+ flow_method_name=current_flow_method_name.get(),
+ event_id_stack=_event_id_stack.get(),
+ last_event_id=_last_event_id.get(),
+ triggering_event_id=_triggering_event_id.get(),
+ emission_sequence=get_emission_sequence(),
+ feedback_callback_info=feedback_callback_info,
+ platform_token=_platform_integration_token.get(),
+ )
+
+
+def apply_execution_context(ctx: ExecutionContext) -> None:
+ """Write an ExecutionContext back into the ContextVars."""
+ _current_task_id.set(ctx.current_task_id)
+ current_flow_request_id.set(ctx.flow_request_id)
+ current_flow_id.set(ctx.flow_id)
+ current_flow_method_name.set(ctx.flow_method_name)
+
+ _event_id_stack.set(ctx.event_id_stack)
+ _last_event_id.set(ctx.last_event_id)
+ _triggering_event_id.set(ctx.triggering_event_id)
+ set_emission_counter(ctx.emission_sequence)
+
+ _platform_integration_token.set(ctx.platform_token)
diff --git a/lib/crewai/src/crewai/task.py b/lib/crewai/src/crewai/task.py
index 38860352b..44d61729d 100644
--- a/lib/crewai/src/crewai/task.py
+++ b/lib/crewai/src/crewai/task.py
@@ -41,6 +41,7 @@ from crewai.events.types.task_events import (
TaskFailedEvent,
TaskStartedEvent,
)
+from crewai.llms.base_llm import BaseLLM
from crewai.security import Fingerprint, SecurityConfig
from crewai.tasks.output_format import OutputFormat
from crewai.tasks.task_output import TaskOutput
@@ -316,6 +317,10 @@ class Task(BaseModel):
if self.agent is None:
raise ValueError("Agent is required to use LLMGuardrail")
+ if not isinstance(self.agent.llm, BaseLLM):
+ raise ValueError(
+ "Agent must have a BaseLLM instance to use LLMGuardrail"
+ )
self._guardrail = cast(
GuardrailCallable,
LLMGuardrail(description=self.guardrail, llm=self.agent.llm),
@@ -339,6 +344,10 @@ class Task(BaseModel):
)
from crewai.tasks.llm_guardrail import LLMGuardrail
+ if not isinstance(self.agent.llm, BaseLLM):
+ raise ValueError(
+ "Agent must have a BaseLLM instance to use LLMGuardrail"
+ )
guardrails.append(
cast(
GuardrailCallable,
@@ -359,6 +368,10 @@ class Task(BaseModel):
)
from crewai.tasks.llm_guardrail import LLMGuardrail
+ if not isinstance(self.agent.llm, BaseLLM):
+ raise ValueError(
+ "Agent must have a BaseLLM instance to use LLMGuardrail"
+ )
guardrails.append(
cast(
GuardrailCallable,
@@ -646,7 +659,12 @@ class Task(BaseModel):
await cb_result
crew = self.agent.crew # type: ignore[union-attr]
- if crew and crew.task_callback and crew.task_callback != self.callback:
+ if (
+ crew
+ and not isinstance(crew, str)
+ and crew.task_callback
+ and crew.task_callback != self.callback
+ ):
cb_result = crew.task_callback(self.output)
if inspect.isawaitable(cb_result):
await cb_result
@@ -761,7 +779,12 @@ class Task(BaseModel):
asyncio.run(cb_result)
crew = self.agent.crew # type: ignore[union-attr]
- if crew and crew.task_callback and crew.task_callback != self.callback:
+ if (
+ crew
+ and not isinstance(crew, str)
+ and crew.task_callback
+ and crew.task_callback != self.callback
+ ):
cb_result = crew.task_callback(self.output)
if inspect.iscoroutine(cb_result):
asyncio.run(cb_result)
@@ -812,11 +835,14 @@ class Task(BaseModel):
if trigger_payload is not None:
description += f"\n\nTrigger Payload: {trigger_payload}"
- if self.agent and self.agent.crew:
+ if self.agent and self.agent.crew and not isinstance(self.agent.crew, str):
files = get_all_files(self.agent.crew.id, self.id)
if files:
supported_types: list[str] = []
- if self.agent.llm and self.agent.llm.supports_multimodal():
+ if (
+ isinstance(self.agent.llm, BaseLLM)
+ and self.agent.llm.supports_multimodal()
+ ):
provider: str = str(
getattr(self.agent.llm, "provider", None)
or getattr(self.agent.llm, "model", "openai")
diff --git a/lib/crewai/src/crewai/telemetry/telemetry.py b/lib/crewai/src/crewai/telemetry/telemetry.py
index ff4977254..ac25161bf 100644
--- a/lib/crewai/src/crewai/telemetry/telemetry.py
+++ b/lib/crewai/src/crewai/telemetry/telemetry.py
@@ -41,6 +41,7 @@ from crewai.events.types.system_events import (
SigTStpEvent,
SigTermEvent,
)
+from crewai.llms.base_llm import BaseLLM
from crewai.telemetry.constants import (
CREWAI_TELEMETRY_BASE_URL,
CREWAI_TELEMETRY_SERVICE_NAME,
@@ -323,7 +324,9 @@ class Telemetry:
if getattr(agent, "function_calling_llm", None)
else ""
),
- "llm": agent.llm.model,
+ "llm": agent.llm.model
+ if isinstance(agent.llm, BaseLLM)
+ else str(agent.llm),
"delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": getattr(
agent, "allow_code_execution", False
@@ -427,7 +430,9 @@ class Telemetry:
if getattr(agent, "function_calling_llm", None)
else ""
),
- "llm": agent.llm.model,
+ "llm": agent.llm.model
+ if isinstance(agent.llm, BaseLLM)
+ else str(agent.llm),
"delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": getattr(
agent, "allow_code_execution", False
@@ -840,7 +845,9 @@ class Telemetry:
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"i18n": agent.i18n.prompt_file,
- "llm": agent.llm.model,
+ "llm": agent.llm.model
+ if isinstance(agent.llm, BaseLLM)
+ else str(agent.llm),
"delegation_enabled?": agent.allow_delegation,
"tools_names": [
sanitize_tool_name(tool.name)
diff --git a/lib/devtools/src/crewai_devtools/__init__.py b/lib/devtools/src/crewai_devtools/__init__.py
index 6e4f62e46..d878b722b 100644
--- a/lib/devtools/src/crewai_devtools/__init__.py
+++ b/lib/devtools/src/crewai_devtools/__init__.py
@@ -1,3 +1,3 @@
"""CrewAI development tools."""
-__version__ = "1.13.0a6"
+__version__ = "1.13.0a7"