refactor: reorganize agent executor imports and introduce CrewAgentExecutorFlow

- Removed the old import of CrewAgentExecutorFlow and replaced it with the new import from the experimental module.
- Updated relevant references in the codebase to ensure compatibility with the new structure.
- Enhanced the organization of imports in core.py and base_agent.py for better clarity and maintainability.
This commit is contained in:
lorenzejay
2025-12-10 09:40:00 -08:00
parent e70fbb8898
commit 563280cc9b
12 changed files with 62 additions and 26 deletions

View File

@@ -34,7 +34,6 @@ from crewai.agent.utils import (
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.cache.cache_handler import CacheHandler from crewai.agents.cache.cache_handler import CacheHandler
from crewai.agents.crew_agent_executor import CrewAgentExecutor from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.agents.crew_agent_executor_flow import CrewAgentExecutorFlow
from crewai.events.event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.knowledge_events import ( from crewai.events.types.knowledge_events import (
KnowledgeQueryCompletedEvent, KnowledgeQueryCompletedEvent,
@@ -45,6 +44,7 @@ from crewai.events.types.memory_events import (
MemoryRetrievalCompletedEvent, MemoryRetrievalCompletedEvent,
MemoryRetrievalStartedEvent, MemoryRetrievalStartedEvent,
) )
from crewai.experimental.crew_agent_executor_flow import CrewAgentExecutorFlow
from crewai.knowledge.knowledge import Knowledge from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.lite_agent import LiteAgent from crewai.lite_agent import LiteAgent

View File

@@ -457,7 +457,6 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
if self.cache: if self.cache:
self.cache_handler = cache_handler self.cache_handler = cache_handler
self.tools_handler.cache = cache_handler self.tools_handler.cache = cache_handler
# TODO: we should do if agent_executor, then we update as we were re-creating the agent_executor which is not ideal
def set_rpm_controller(self, rpm_controller: RPMController) -> None: def set_rpm_controller(self, rpm_controller: RPMController) -> None:
"""Set the rpm controller for the agent. """Set the rpm controller for the agent.
@@ -467,7 +466,6 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
""" """
if not self._rpm_controller: if not self._rpm_controller:
self._rpm_controller = rpm_controller self._rpm_controller = rpm_controller
# TODO: we should do if agent_executor, then we update as we were re-creating the agent_executor which is not ideal
def set_knowledge(self, crew_embedder: EmbedderConfig | None = None) -> None: def set_knowledge(self, crew_embedder: EmbedderConfig | None = None) -> None:
pass pass

View File

@@ -1,3 +1,4 @@
from crewai.experimental.crew_agent_executor_flow import CrewAgentExecutorFlow
from crewai.experimental.evaluation import ( from crewai.experimental.evaluation import (
AgentEvaluationResult, AgentEvaluationResult,
AgentEvaluator, AgentEvaluator,
@@ -23,6 +24,7 @@ __all__ = [
"AgentEvaluationResult", "AgentEvaluationResult",
"AgentEvaluator", "AgentEvaluator",
"BaseEvaluator", "BaseEvaluator",
"CrewAgentExecutorFlow",
"EvaluationScore", "EvaluationScore",
"EvaluationTraceCallback", "EvaluationTraceCallback",
"ExperimentResult", "ExperimentResult",

View File

@@ -1,8 +1,9 @@
from __future__ import annotations
from collections.abc import Sequence from collections.abc import Sequence
import threading import threading
from typing import Any from typing import TYPE_CHECKING, Any
from crewai.agent.core import Agent
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.events.event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.agent_events import ( from crewai.events.types.agent_events import (
@@ -28,6 +29,10 @@ from crewai.experimental.evaluation.evaluation_listener import (
from crewai.task import Task from crewai.task import Task
if TYPE_CHECKING:
from crewai.agent import Agent
class ExecutionState: class ExecutionState:
current_agent_id: str | None = None current_agent_id: str | None = None
current_task_id: str | None = None current_task_id: str | None = None

View File

@@ -1,17 +1,22 @@
from __future__ import annotations
import abc import abc
import enum import enum
from enum import Enum from enum import Enum
from typing import Any from typing import TYPE_CHECKING, Any
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from crewai.agent import Agent
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.llm import BaseLLM from crewai.llm import BaseLLM
from crewai.task import Task from crewai.task import Task
from crewai.utilities.llm_utils import create_llm from crewai.utilities.llm_utils import create_llm
if TYPE_CHECKING:
from crewai.agent import Agent
class MetricCategory(enum.Enum): class MetricCategory(enum.Enum):
GOAL_ALIGNMENT = "goal_alignment" GOAL_ALIGNMENT = "goal_alignment"
SEMANTIC_QUALITY = "semantic_quality" SEMANTIC_QUALITY = "semantic_quality"

View File

@@ -1,8 +1,9 @@
from __future__ import annotations
from collections import defaultdict from collections import defaultdict
from hashlib import md5 from hashlib import md5
from typing import Any from typing import TYPE_CHECKING, Any
from crewai import Agent, Crew
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.experimental.evaluation import AgentEvaluator, create_default_evaluator from crewai.experimental.evaluation import AgentEvaluator, create_default_evaluator
from crewai.experimental.evaluation.evaluation_display import ( from crewai.experimental.evaluation.evaluation_display import (
@@ -17,6 +18,11 @@ from crewai.experimental.evaluation.experiment.result_display import (
) )
if TYPE_CHECKING:
from crewai.agent import Agent
from crewai.crew import Crew
class ExperimentRunner: class ExperimentRunner:
def __init__(self, dataset: list[dict[str, Any]]): def __init__(self, dataset: list[dict[str, Any]]):
self.dataset = dataset or [] self.dataset = dataset or []

View File

@@ -1,6 +1,7 @@
from typing import Any from __future__ import annotations
from typing import TYPE_CHECKING, Any
from crewai.agent import Agent
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.experimental.evaluation.base_evaluator import ( from crewai.experimental.evaluation.base_evaluator import (
BaseEvaluator, BaseEvaluator,
@@ -12,6 +13,10 @@ from crewai.task import Task
from crewai.utilities.types import LLMMessage from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.agent import Agent
class GoalAlignmentEvaluator(BaseEvaluator): class GoalAlignmentEvaluator(BaseEvaluator):
@property @property
def metric_category(self) -> MetricCategory: def metric_category(self) -> MetricCategory:

View File

@@ -6,15 +6,16 @@ This module provides evaluator implementations for:
- Thinking-to-action ratio - Thinking-to-action ratio
""" """
from __future__ import annotations
from collections.abc import Sequence from collections.abc import Sequence
from enum import Enum from enum import Enum
import logging import logging
import re import re
from typing import Any from typing import TYPE_CHECKING, Any
import numpy as np import numpy as np
from crewai.agent import Agent
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.experimental.evaluation.base_evaluator import ( from crewai.experimental.evaluation.base_evaluator import (
BaseEvaluator, BaseEvaluator,
@@ -27,6 +28,10 @@ from crewai.tasks.task_output import TaskOutput
from crewai.utilities.types import LLMMessage from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.agent import Agent
class ReasoningPatternType(Enum): class ReasoningPatternType(Enum):
EFFICIENT = "efficient" # Good reasoning flow EFFICIENT = "efficient" # Good reasoning flow
LOOP = "loop" # Agent is stuck in a loop LOOP = "loop" # Agent is stuck in a loop

View File

@@ -1,6 +1,7 @@
from typing import Any from __future__ import annotations
from typing import TYPE_CHECKING, Any
from crewai.agent import Agent
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.experimental.evaluation.base_evaluator import ( from crewai.experimental.evaluation.base_evaluator import (
BaseEvaluator, BaseEvaluator,
@@ -12,6 +13,10 @@ from crewai.task import Task
from crewai.utilities.types import LLMMessage from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.agent import Agent
class SemanticQualityEvaluator(BaseEvaluator): class SemanticQualityEvaluator(BaseEvaluator):
@property @property
def metric_category(self) -> MetricCategory: def metric_category(self) -> MetricCategory:

View File

@@ -1,7 +1,8 @@
import json from __future__ import annotations
from typing import Any
import json
from typing import TYPE_CHECKING, Any
from crewai.agent import Agent
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.experimental.evaluation.base_evaluator import ( from crewai.experimental.evaluation.base_evaluator import (
BaseEvaluator, BaseEvaluator,
@@ -13,6 +14,10 @@ from crewai.task import Task
from crewai.utilities.types import LLMMessage from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.agent import Agent
class ToolSelectionEvaluator(BaseEvaluator): class ToolSelectionEvaluator(BaseEvaluator):
@property @property
def metric_category(self) -> MetricCategory: def metric_category(self) -> MetricCategory:

View File

@@ -8,7 +8,7 @@ from unittest.mock import Mock, patch
import pytest import pytest
from crewai.agents.crew_agent_executor_flow import ( from crewai.experimental.crew_agent_executor_flow import (
AgentReActState, AgentReActState,
CrewAgentExecutorFlow, CrewAgentExecutorFlow,
) )
@@ -233,7 +233,7 @@ class TestCrewAgentExecutorFlow:
AgentFinish(thought="thinking", output="test", text="final") AgentFinish(thought="thinking", output="test", text="final")
) )
@patch("crewai.agents.crew_agent_executor_flow.handle_output_parser_exception") @patch("crewai.experimental.crew_agent_executor_flow.handle_output_parser_exception")
def test_recover_from_parser_error( def test_recover_from_parser_error(
self, mock_handle_exception, mock_dependencies self, mock_handle_exception, mock_dependencies
): ):
@@ -252,7 +252,7 @@ class TestCrewAgentExecutorFlow:
assert executor.state.iterations == initial_iterations + 1 assert executor.state.iterations == initial_iterations + 1
mock_handle_exception.assert_called_once() mock_handle_exception.assert_called_once()
@patch("crewai.agents.crew_agent_executor_flow.handle_context_length") @patch("crewai.experimental.crew_agent_executor_flow.handle_context_length")
def test_recover_from_context_length( def test_recover_from_context_length(
self, mock_handle_context, mock_dependencies self, mock_handle_context, mock_dependencies
): ):
@@ -321,8 +321,8 @@ class TestFlowErrorHandling:
"tools_handler": Mock(), "tools_handler": Mock(),
} }
@patch("crewai.agents.crew_agent_executor_flow.get_llm_response") @patch("crewai.experimental.crew_agent_executor_flow.get_llm_response")
@patch("crewai.agents.crew_agent_executor_flow.enforce_rpm_limit") @patch("crewai.experimental.crew_agent_executor_flow.enforce_rpm_limit")
def test_call_llm_parser_error( def test_call_llm_parser_error(
self, mock_enforce_rpm, mock_get_llm, mock_dependencies self, mock_enforce_rpm, mock_get_llm, mock_dependencies
): ):
@@ -338,9 +338,9 @@ class TestFlowErrorHandling:
assert result == "parser_error" assert result == "parser_error"
assert executor._last_parser_error is not None assert executor._last_parser_error is not None
@patch("crewai.agents.crew_agent_executor_flow.get_llm_response") @patch("crewai.experimental.crew_agent_executor_flow.get_llm_response")
@patch("crewai.agents.crew_agent_executor_flow.enforce_rpm_limit") @patch("crewai.experimental.crew_agent_executor_flow.enforce_rpm_limit")
@patch("crewai.agents.crew_agent_executor_flow.is_context_length_exceeded") @patch("crewai.experimental.crew_agent_executor_flow.is_context_length_exceeded")
def test_call_llm_context_error( def test_call_llm_context_error(
self, self,
mock_is_context_exceeded, mock_is_context_exceeded,