From 0029f8193c7bf7af25975137d4aa8688a568c61b Mon Sep 17 00:00:00 2001 From: lorenzejay Date: Fri, 9 Jan 2026 14:42:50 -0800 Subject: [PATCH] wip restrcuturing agent executor and liteagent --- lib/crewai/src/crewai/agent/core.py | 319 +++++++++++++++--- .../base_agent_executor_mixin.py | 4 +- .../src/crewai/experimental/__init__.py | 5 +- ...ent_executor_flow.py => agent_executor.py} | 32 +- lib/crewai/src/crewai/lite_agent.py | 18 + ...xecutor_flow.py => test_agent_executor.py} | 98 +++--- 6 files changed, 367 insertions(+), 109 deletions(-) rename lib/crewai/src/crewai/experimental/{crew_agent_executor_flow.py => agent_executor.py} (96%) rename lib/crewai/tests/agents/{test_crew_agent_executor_flow.py => test_agent_executor.py} (82%) diff --git a/lib/crewai/src/crewai/agent/core.py b/lib/crewai/src/crewai/agent/core.py index d06b3b6f7..8464eea66 100644 --- a/lib/crewai/src/crewai/agent/core.py +++ b/lib/crewai/src/crewai/agent/core.py @@ -35,6 +35,11 @@ from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.cache.cache_handler import CacheHandler from crewai.agents.crew_agent_executor import CrewAgentExecutor from crewai.events.event_bus import crewai_event_bus +from crewai.events.types.agent_events import ( + LiteAgentExecutionCompletedEvent, + LiteAgentExecutionErrorEvent, + LiteAgentExecutionStartedEvent, +) from crewai.events.types.knowledge_events import ( KnowledgeQueryCompletedEvent, KnowledgeQueryFailedEvent, @@ -44,10 +49,10 @@ from crewai.events.types.memory_events import ( MemoryRetrievalCompletedEvent, MemoryRetrievalStartedEvent, ) -from crewai.experimental.crew_agent_executor_flow import CrewAgentExecutorFlow +from crewai.experimental.agent_executor import AgentExecutor from crewai.knowledge.knowledge import Knowledge from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource -from crewai.lite_agent import LiteAgent +from crewai.lite_agent_output import LiteAgentOutput from crewai.llms.base_llm import BaseLLM from crewai.mcp import ( MCPClient, @@ -70,10 +75,12 @@ from crewai.utilities.agent_utils import ( render_text_description_and_args, ) from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE -from crewai.utilities.converter import Converter +from crewai.utilities.converter import Converter, ConverterError +from crewai.utilities.guardrail import process_guardrail from crewai.utilities.guardrail_types import GuardrailType from crewai.utilities.llm_utils import create_llm from crewai.utilities.prompts import Prompts +from crewai.utilities.pydantic_schema_utils import generate_model_description from crewai.utilities.token_counter_callback import TokenCalcHandler from crewai.utilities.training_handler import CrewTrainingHandler @@ -82,7 +89,6 @@ if TYPE_CHECKING: from crewai_tools import CodeInterpreterTool from crewai.agents.agent_builder.base_agent import PlatformAppOrAction - from crewai.lite_agent_output import LiteAgentOutput from crewai.task import Task from crewai.tools.base_tool import BaseTool from crewai.utilities.types import LLMMessage @@ -106,7 +112,7 @@ class Agent(BaseAgent): The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents. Attributes: - agent_executor: An instance of the CrewAgentExecutor or CrewAgentExecutorFlow class. + agent_executor: An instance of the CrewAgentExecutor or AgentExecutor class. role: The role of the agent. goal: The objective of the agent. backstory: The backstory of the agent. @@ -222,9 +228,9 @@ class Agent(BaseAgent): default=None, description="A2A (Agent-to-Agent) configuration for delegating tasks to remote agents. Can be a single A2AConfig or a dict mapping agent IDs to configs.", ) - executor_class: type[CrewAgentExecutor] | type[CrewAgentExecutorFlow] = Field( + executor_class: type[CrewAgentExecutor] | type[AgentExecutor] = Field( default=CrewAgentExecutor, - description="Class to use for the agent executor. Defaults to CrewAgentExecutor, can optionally use CrewAgentExecutorFlow.", + description="Class to use for the agent executor. Defaults to CrewAgentExecutor, can optionally use AgentExecutor.", ) @model_validator(mode="before") @@ -1573,10 +1579,10 @@ class Agent(BaseAgent): response_format: type[Any] | None = None, ) -> LiteAgentOutput: """ - Execute the agent with the given messages using a LiteAgent instance. + Execute the agent with the given messages using the AgentExecutor. - This method is useful when you want to use the Agent configuration but - with the simpler and more direct execution flow of LiteAgent. + This method provides standalone agent execution without requiring a Crew. + It supports tools, response formatting, and guardrails. Args: messages: Either a string query or a list of message dictionaries. @@ -1587,6 +1593,7 @@ class Agent(BaseAgent): Returns: LiteAgentOutput: The result of the agent execution. """ + # Process platform apps and MCP tools if self.apps: platform_tools = self.get_platform_tools(self.apps) if platform_tools and self.tools is not None: @@ -1596,25 +1603,264 @@ class Agent(BaseAgent): if mcps and self.tools is not None: self.tools.extend(mcps) - lite_agent = LiteAgent( - id=self.id, - role=self.role, - goal=self.goal, - backstory=self.backstory, - llm=self.llm, - tools=self.tools or [], - max_iterations=self.max_iter, - max_execution_time=self.max_execution_time, - respect_context_window=self.respect_context_window, - verbose=self.verbose, - response_format=response_format, + # Prepare tools + raw_tools: list[BaseTool] = self.tools or [] + parsed_tools = parse_tools(raw_tools) + + # Build agent_info for backward-compatible event emission + agent_info = { + "id": self.id, + "role": self.role, + "goal": self.goal, + "backstory": self.backstory, + "tools": raw_tools, + "verbose": self.verbose, + } + + # Build prompt for standalone execution + prompt = Prompts( + agent=self, + has_tools=len(raw_tools) > 0, i18n=self.i18n, - original_agent=self, - guardrail=self.guardrail, - guardrail_max_retries=self.guardrail_max_retries, + use_system_prompt=self.use_system_prompt, + system_template=self.system_template, + prompt_template=self.prompt_template, + response_template=self.response_template, + ).task_execution() + + # Prepare stop words + stop_words = [self.i18n.slice("observation")] + if self.response_template: + stop_words.append( + self.response_template.split("{{ .Response }}")[1].strip() + ) + + # Get RPM limit function + rpm_limit_fn = ( + self._rpm_controller.check_or_wait if self._rpm_controller else None ) - return lite_agent.kickoff(messages) + # Create the executor for standalone mode (no crew, no task) + executor = AgentExecutor( + llm=cast(BaseLLM, self.llm), + agent=self, + prompt=prompt, + max_iter=self.max_iter, + tools=parsed_tools, + tools_names=get_tool_names(parsed_tools), + stop_words=stop_words, + tools_description=render_text_description_and_args(parsed_tools), + tools_handler=self.tools_handler, + task=None, # Standalone mode + crew=None, # Standalone mode + original_tools=raw_tools, + step_callback=self.step_callback, + function_calling_llm=self.function_calling_llm, + respect_context_window=self.respect_context_window, + request_within_rpm_limit=rpm_limit_fn, + callbacks=[TokenCalcHandler(self._token_process)], + response_model=response_format, + i18n=self.i18n, + ) + + # Format messages for the executor + if isinstance(messages, str): + formatted_messages = messages + else: + # Convert list of messages to a single input string + formatted_messages = "\n".join( + str(msg.get("content", "")) for msg in messages if msg.get("content") + ) + + # Build the input dict for the executor + inputs = { + "input": formatted_messages, + "tool_names": get_tool_names(parsed_tools), + "tools": render_text_description_and_args(parsed_tools), + } + + try: + # Emit started event for backward compatibility with LiteAgent listeners + crewai_event_bus.emit( + self, + event=LiteAgentExecutionStartedEvent( + agent_info=agent_info, + tools=parsed_tools, + messages=messages, + ), + ) + + # Execute and build output + output = self._execute_and_build_output(executor, inputs, response_format) + + # Process guardrail if configured + if self.guardrail is not None: + output = self._process_kickoff_guardrail( + output=output, + executor=executor, + inputs=inputs, + response_format=response_format, + ) + + # Emit completed event for backward compatibility with LiteAgent listeners + crewai_event_bus.emit( + self, + event=LiteAgentExecutionCompletedEvent( + agent_info=agent_info, + output=output.raw, + ), + ) + + return output + + except Exception as e: + # Emit error event for backward compatibility with LiteAgent listeners + crewai_event_bus.emit( + self, + event=LiteAgentExecutionErrorEvent( + agent_info=agent_info, + error=str(e), + ), + ) + raise + + def _execute_and_build_output( + self, + executor: AgentExecutor, + inputs: dict[str, str], + response_format: type[Any] | None = None, + ) -> LiteAgentOutput: + """Execute the agent and build the output object. + + Args: + executor: The executor instance. + inputs: Input dictionary for execution. + response_format: Optional response format. + + Returns: + LiteAgentOutput with raw output, formatted result, and metrics. + """ + import json + + # Execute the agent + result = executor.invoke(inputs) + raw_output = result.get("output", "") + + # Handle response format conversion + formatted_result: BaseModel | None = None + if response_format: + try: + model_schema = generate_model_description(response_format) + schema = json.dumps(model_schema, indent=2) + instructions = self.i18n.slice("formatted_task_instructions").format( + output_format=schema + ) + + converter = Converter( + llm=self.llm, + text=raw_output, + model=response_format, + instructions=instructions, + ) + + conversion_result = converter.to_pydantic() + if isinstance(conversion_result, BaseModel): + formatted_result = conversion_result + except ConverterError: + pass # Keep raw output if conversion fails + + # Get token usage metrics + if isinstance(self.llm, BaseLLM): + usage_metrics = self.llm.get_token_usage_summary() + else: + usage_metrics = self._token_process.get_summary() + + return LiteAgentOutput( + raw=raw_output, + pydantic=formatted_result, + agent_role=self.role, + usage_metrics=usage_metrics.model_dump() if usage_metrics else None, + messages=executor.messages, + ) + + def _process_kickoff_guardrail( + self, + output: LiteAgentOutput, + executor: AgentExecutor, + inputs: dict[str, str], + response_format: type[Any] | None = None, + retry_count: int = 0, + ) -> LiteAgentOutput: + """Process guardrail for kickoff execution with retry logic. + + Args: + output: Current agent output. + executor: The executor instance. + inputs: Input dictionary for re-execution. + response_format: Optional response format. + retry_count: Current retry count. + + Returns: + Validated/updated output. + """ + from crewai.utilities.guardrail_types import GuardrailCallable + + # Ensure guardrail is callable + guardrail_callable: GuardrailCallable + if isinstance(self.guardrail, str): + from crewai.tasks.llm_guardrail import LLMGuardrail + + guardrail_callable = cast( + GuardrailCallable, + LLMGuardrail(description=self.guardrail, llm=cast(BaseLLM, self.llm)), + ) + elif callable(self.guardrail): + guardrail_callable = self.guardrail + else: + # Should not happen if called from kickoff with guardrail check + return output + + guardrail_result = process_guardrail( + output=output, + guardrail=guardrail_callable, + retry_count=retry_count, + event_source=self, + from_agent=self, + ) + + if not guardrail_result.success: + if retry_count >= self.guardrail_max_retries: + raise ValueError( + f"Agent's guardrail failed validation after {self.guardrail_max_retries} retries. " + f"Last error: {guardrail_result.error}" + ) + + # Add feedback and re-execute + executor._append_message_to_state( + guardrail_result.error or "Guardrail validation failed", + role="user", + ) + + # Re-execute and build new output + output = self._execute_and_build_output(executor, inputs, response_format) + + # Recursively retry guardrail + return self._process_kickoff_guardrail( + output=output, + executor=executor, + inputs=inputs, + response_format=response_format, + retry_count=retry_count + 1, + ) + + # Apply guardrail result if available + if guardrail_result.result is not None: + if isinstance(guardrail_result.result, str): + output.raw = guardrail_result.result + elif isinstance(guardrail_result.result, BaseModel): + output.pydantic = guardrail_result.result + + return output async def kickoff_async( self, @@ -1622,7 +1868,7 @@ class Agent(BaseAgent): response_format: type[Any] | None = None, ) -> LiteAgentOutput: """ - Execute the agent asynchronously with the given messages using a LiteAgent instance. + Execute the agent asynchronously with the given messages. This is the async version of the kickoff method. @@ -1635,21 +1881,4 @@ class Agent(BaseAgent): Returns: LiteAgentOutput: The result of the agent execution. """ - lite_agent = LiteAgent( - role=self.role, - goal=self.goal, - backstory=self.backstory, - llm=self.llm, - tools=self.tools or [], - max_iterations=self.max_iter, - max_execution_time=self.max_execution_time, - respect_context_window=self.respect_context_window, - verbose=self.verbose, - response_format=response_format, - i18n=self.i18n, - original_agent=self, - guardrail=self.guardrail, - guardrail_max_retries=self.guardrail_max_retries, - ) - - return await lite_agent.kickoff_async(messages) + return await asyncio.to_thread(self.kickoff, messages, response_format) diff --git a/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor_mixin.py b/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor_mixin.py index cc4c9d4d8..e54742255 100644 --- a/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor_mixin.py +++ b/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor_mixin.py @@ -21,9 +21,9 @@ if TYPE_CHECKING: class CrewAgentExecutorMixin: - crew: Crew + crew: Crew | None agent: Agent - task: Task + task: Task | None iterations: int max_iter: int messages: list[LLMMessage] diff --git a/lib/crewai/src/crewai/experimental/__init__.py b/lib/crewai/src/crewai/experimental/__init__.py index 9507095ff..662a722f3 100644 --- a/lib/crewai/src/crewai/experimental/__init__.py +++ b/lib/crewai/src/crewai/experimental/__init__.py @@ -1,4 +1,4 @@ -from crewai.experimental.crew_agent_executor_flow import CrewAgentExecutorFlow +from crewai.experimental.agent_executor import AgentExecutor, CrewAgentExecutorFlow from crewai.experimental.evaluation import ( AgentEvaluationResult, AgentEvaluator, @@ -23,8 +23,9 @@ from crewai.experimental.evaluation import ( __all__ = [ "AgentEvaluationResult", "AgentEvaluator", + "AgentExecutor", "BaseEvaluator", - "CrewAgentExecutorFlow", + "CrewAgentExecutorFlow", # Deprecated alias for AgentExecutor "EvaluationScore", "EvaluationTraceCallback", "ExperimentResult", diff --git a/lib/crewai/src/crewai/experimental/crew_agent_executor_flow.py b/lib/crewai/src/crewai/experimental/agent_executor.py similarity index 96% rename from lib/crewai/src/crewai/experimental/crew_agent_executor_flow.py rename to lib/crewai/src/crewai/experimental/agent_executor.py index 7111c97ab..ee8a14595 100644 --- a/lib/crewai/src/crewai/experimental/crew_agent_executor_flow.py +++ b/lib/crewai/src/crewai/experimental/agent_executor.py @@ -73,13 +73,17 @@ class AgentReActState(BaseModel): ask_for_human_input: bool = Field(default=False) -class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin): - """Flow-based executor matching CrewAgentExecutor interface. +class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): + """Flow-based agent executor for both standalone and crew-bound execution. Inherits from: - Flow[AgentReActState]: Provides flow orchestration capabilities - CrewAgentExecutorMixin: Provides memory methods (short/long/external term) + This executor can operate in two modes: + - Standalone mode: When crew and task are None (used by Agent.kickoff()) + - Crew mode: When crew and task are provided (used by Agent.execute_task()) + Note: Multiple instances may be created during agent initialization (cache setup, RPM controller setup, etc.) but only the final instance should execute tasks via invoke(). @@ -88,8 +92,6 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin): def __init__( self, llm: BaseLLM, - task: Task, - crew: Crew, agent: Agent, prompt: SystemPromptResult | StandardPromptResult, max_iter: int, @@ -98,6 +100,8 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin): stop_words: list[str], tools_description: str, tools_handler: ToolsHandler, + task: Task | None = None, + crew: Crew | None = None, step_callback: Any = None, original_tools: list[BaseTool] | None = None, function_calling_llm: BaseLLM | Any | None = None, @@ -111,8 +115,6 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin): Args: llm: Language model instance. - task: Task to execute. - crew: Crew instance. agent: Agent to execute. prompt: Prompt templates. max_iter: Maximum iterations. @@ -121,6 +123,8 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin): stop_words: Stop word list. tools_description: Tool descriptions. tools_handler: Tool handler instance. + task: Optional task to execute (None for standalone agent execution). + crew: Optional crew instance (None for standalone agent execution). step_callback: Optional step callback. original_tools: Original tool list. function_calling_llm: Optional function calling LLM. @@ -131,9 +135,9 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin): """ self._i18n: I18N = i18n or get_i18n() self.llm = llm - self.task = task + self.task: Task | None = task self.agent = agent - self.crew = crew + self.crew: Crew | None = crew self.prompt = prompt self.tools = tools self.tools_names = tools_names @@ -621,10 +625,12 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin): result: Agent's final output. human_feedback: Optional feedback from human. """ + # Early return if no crew (standalone mode) + if self.crew is None: + return + agent_id = str(self.agent.id) - train_iteration = ( - getattr(self.crew, "_train_iteration", None) if self.crew else None - ) + train_iteration = getattr(self.crew, "_train_iteration", None) if train_iteration is None or not isinstance(train_iteration, int): train_error = Text() @@ -806,3 +812,7 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin): requiring arbitrary_types_allowed=True. """ return core_schema.any_schema() + + +# Backward compatibility alias (deprecated) +CrewAgentExecutorFlow = AgentExecutor diff --git a/lib/crewai/src/crewai/lite_agent.py b/lib/crewai/src/crewai/lite_agent.py index 9bb3193e5..b59f230a7 100644 --- a/lib/crewai/src/crewai/lite_agent.py +++ b/lib/crewai/src/crewai/lite_agent.py @@ -10,6 +10,7 @@ from typing import ( get_origin, ) import uuid +import warnings from pydantic import ( UUID4, @@ -80,6 +81,11 @@ class LiteAgent(FlowTrackable, BaseModel): """ A lightweight agent that can process messages and use tools. + .. deprecated:: + LiteAgent is deprecated and will be removed in a future version. + Use ``Agent().kickoff(messages)`` instead, which provides the same + functionality with additional features like memory and knowledge support. + This agent is simpler than the full Agent class, focusing on direct execution rather than task delegation. It's designed to be used for simple interactions where a full crew is not needed. @@ -164,6 +170,18 @@ class LiteAgent(FlowTrackable, BaseModel): default_factory=get_after_llm_call_hooks ) + @model_validator(mode="after") + def emit_deprecation_warning(self) -> Self: + """Emit deprecation warning for LiteAgent usage.""" + warnings.warn( + "LiteAgent is deprecated and will be removed in a future version. " + "Use Agent().kickoff(messages) instead, which provides the same " + "functionality with additional features like memory and knowledge support.", + DeprecationWarning, + stacklevel=2, + ) + return self + @model_validator(mode="after") def setup_llm(self) -> Self: """Set up the LLM and other components after initialization.""" diff --git a/lib/crewai/tests/agents/test_crew_agent_executor_flow.py b/lib/crewai/tests/agents/test_agent_executor.py similarity index 82% rename from lib/crewai/tests/agents/test_crew_agent_executor_flow.py rename to lib/crewai/tests/agents/test_agent_executor.py index 36fd887eb..8560d9321 100644 --- a/lib/crewai/tests/agents/test_crew_agent_executor_flow.py +++ b/lib/crewai/tests/agents/test_agent_executor.py @@ -1,4 +1,4 @@ -"""Unit tests for CrewAgentExecutorFlow. +"""Unit tests for AgentExecutor. Tests the Flow-based agent executor implementation including state management, flow methods, routing logic, and error handling. @@ -8,9 +8,9 @@ from unittest.mock import Mock, patch import pytest -from crewai.experimental.crew_agent_executor_flow import ( +from crewai.experimental.agent_executor import ( AgentReActState, - CrewAgentExecutorFlow, + AgentExecutor, ) from crewai.agents.parser import AgentAction, AgentFinish @@ -43,8 +43,8 @@ class TestAgentReActState: assert state.ask_for_human_input is True -class TestCrewAgentExecutorFlow: - """Test CrewAgentExecutorFlow class.""" +class TestAgentExecutor: + """Test AgentExecutor class.""" @pytest.fixture def mock_dependencies(self): @@ -87,8 +87,8 @@ class TestCrewAgentExecutorFlow: } def test_executor_initialization(self, mock_dependencies): - """Test CrewAgentExecutorFlow initialization.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + """Test AgentExecutor initialization.""" + executor = AgentExecutor(**mock_dependencies) assert executor.llm == mock_dependencies["llm"] assert executor.task == mock_dependencies["task"] @@ -100,9 +100,9 @@ class TestCrewAgentExecutorFlow: def test_initialize_reasoning(self, mock_dependencies): """Test flow entry point.""" with patch.object( - CrewAgentExecutorFlow, "_show_start_logs" + AgentExecutor, "_show_start_logs" ) as mock_show_start: - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) result = executor.initialize_reasoning() assert result == "initialized" @@ -110,7 +110,7 @@ class TestCrewAgentExecutorFlow: def test_check_max_iterations_not_reached(self, mock_dependencies): """Test routing when iterations < max.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) executor.state.iterations = 5 result = executor.check_max_iterations() @@ -118,7 +118,7 @@ class TestCrewAgentExecutorFlow: def test_check_max_iterations_reached(self, mock_dependencies): """Test routing when iterations >= max.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) executor.state.iterations = 10 result = executor.check_max_iterations() @@ -126,7 +126,7 @@ class TestCrewAgentExecutorFlow: def test_route_by_answer_type_action(self, mock_dependencies): """Test routing for AgentAction.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) executor.state.current_answer = AgentAction( thought="thinking", tool="search", tool_input="query", text="action text" ) @@ -136,7 +136,7 @@ class TestCrewAgentExecutorFlow: def test_route_by_answer_type_finish(self, mock_dependencies): """Test routing for AgentFinish.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) executor.state.current_answer = AgentFinish( thought="final thoughts", output="Final answer", text="complete" ) @@ -146,7 +146,7 @@ class TestCrewAgentExecutorFlow: def test_continue_iteration(self, mock_dependencies): """Test iteration continuation.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) result = executor.continue_iteration() @@ -154,8 +154,8 @@ class TestCrewAgentExecutorFlow: def test_finalize_success(self, mock_dependencies): """Test finalize with valid AgentFinish.""" - with patch.object(CrewAgentExecutorFlow, "_show_logs") as mock_show_logs: - executor = CrewAgentExecutorFlow(**mock_dependencies) + with patch.object(AgentExecutor, "_show_logs") as mock_show_logs: + executor = AgentExecutor(**mock_dependencies) executor.state.current_answer = AgentFinish( thought="final thinking", output="Done", text="complete" ) @@ -168,7 +168,7 @@ class TestCrewAgentExecutorFlow: def test_finalize_failure(self, mock_dependencies): """Test finalize skips when given AgentAction instead of AgentFinish.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) executor.state.current_answer = AgentAction( thought="thinking", tool="search", tool_input="query", text="action text" ) @@ -181,7 +181,7 @@ class TestCrewAgentExecutorFlow: def test_format_prompt(self, mock_dependencies): """Test prompt formatting.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) inputs = {"input": "test input", "tool_names": "tool1, tool2", "tools": "desc"} result = executor._format_prompt("Prompt {input} {tool_names} {tools}", inputs) @@ -192,18 +192,18 @@ class TestCrewAgentExecutorFlow: def test_is_training_mode_false(self, mock_dependencies): """Test training mode detection when not in training.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) assert executor._is_training_mode() is False def test_is_training_mode_true(self, mock_dependencies): """Test training mode detection when in training.""" mock_dependencies["crew"]._train = True - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) assert executor._is_training_mode() is True def test_append_message_to_state(self, mock_dependencies): """Test message appending to state.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) initial_count = len(executor.state.messages) executor._append_message_to_state("test message") @@ -216,7 +216,7 @@ class TestCrewAgentExecutorFlow: callback = Mock() mock_dependencies["step_callback"] = callback - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) answer = AgentFinish(thought="thinking", output="test", text="final") executor._invoke_step_callback(answer) @@ -226,14 +226,14 @@ class TestCrewAgentExecutorFlow: def test_invoke_step_callback_none(self, mock_dependencies): """Test step callback when none provided.""" mock_dependencies["step_callback"] = None - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) # Should not raise error executor._invoke_step_callback( AgentFinish(thought="thinking", output="test", text="final") ) - @patch("crewai.experimental.crew_agent_executor_flow.handle_output_parser_exception") + @patch("crewai.experimental.agent_executor.handle_output_parser_exception") def test_recover_from_parser_error( self, mock_handle_exception, mock_dependencies ): @@ -242,7 +242,7 @@ class TestCrewAgentExecutorFlow: mock_handle_exception.return_value = None - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) executor._last_parser_error = OutputParserError("test error") initial_iterations = executor.state.iterations @@ -252,12 +252,12 @@ class TestCrewAgentExecutorFlow: assert executor.state.iterations == initial_iterations + 1 mock_handle_exception.assert_called_once() - @patch("crewai.experimental.crew_agent_executor_flow.handle_context_length") + @patch("crewai.experimental.agent_executor.handle_context_length") def test_recover_from_context_length( self, mock_handle_context, mock_dependencies ): """Test recovery from context length error.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) executor._last_context_error = Exception("context too long") initial_iterations = executor.state.iterations @@ -270,16 +270,16 @@ class TestCrewAgentExecutorFlow: def test_use_stop_words_property(self, mock_dependencies): """Test use_stop_words property.""" mock_dependencies["llm"].supports_stop_words.return_value = True - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) assert executor.use_stop_words is True mock_dependencies["llm"].supports_stop_words.return_value = False - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) assert executor.use_stop_words is False def test_compatibility_properties(self, mock_dependencies): """Test compatibility properties for mixin.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) executor.state.messages = [{"role": "user", "content": "test"}] executor.state.iterations = 5 @@ -321,8 +321,8 @@ class TestFlowErrorHandling: "tools_handler": Mock(), } - @patch("crewai.experimental.crew_agent_executor_flow.get_llm_response") - @patch("crewai.experimental.crew_agent_executor_flow.enforce_rpm_limit") + @patch("crewai.experimental.agent_executor.get_llm_response") + @patch("crewai.experimental.agent_executor.enforce_rpm_limit") def test_call_llm_parser_error( self, mock_enforce_rpm, mock_get_llm, mock_dependencies ): @@ -332,15 +332,15 @@ class TestFlowErrorHandling: mock_enforce_rpm.return_value = None mock_get_llm.side_effect = OutputParserError("parse failed") - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) result = executor.call_llm_and_parse() assert result == "parser_error" assert executor._last_parser_error is not None - @patch("crewai.experimental.crew_agent_executor_flow.get_llm_response") - @patch("crewai.experimental.crew_agent_executor_flow.enforce_rpm_limit") - @patch("crewai.experimental.crew_agent_executor_flow.is_context_length_exceeded") + @patch("crewai.experimental.agent_executor.get_llm_response") + @patch("crewai.experimental.agent_executor.enforce_rpm_limit") + @patch("crewai.experimental.agent_executor.is_context_length_exceeded") def test_call_llm_context_error( self, mock_is_context_exceeded, @@ -353,7 +353,7 @@ class TestFlowErrorHandling: mock_get_llm.side_effect = Exception("context length") mock_is_context_exceeded.return_value = True - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) result = executor.call_llm_and_parse() assert result == "context_error" @@ -397,10 +397,10 @@ class TestFlowInvoke: "tools_handler": Mock(), } - @patch.object(CrewAgentExecutorFlow, "kickoff") - @patch.object(CrewAgentExecutorFlow, "_create_short_term_memory") - @patch.object(CrewAgentExecutorFlow, "_create_long_term_memory") - @patch.object(CrewAgentExecutorFlow, "_create_external_memory") + @patch.object(AgentExecutor, "kickoff") + @patch.object(AgentExecutor, "_create_short_term_memory") + @patch.object(AgentExecutor, "_create_long_term_memory") + @patch.object(AgentExecutor, "_create_external_memory") def test_invoke_success( self, mock_external_memory, @@ -410,7 +410,7 @@ class TestFlowInvoke: mock_dependencies, ): """Test successful invoke without human feedback.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) # Mock kickoff to set the final answer in state def mock_kickoff_side_effect(): @@ -429,10 +429,10 @@ class TestFlowInvoke: mock_long_term_memory.assert_called_once() mock_external_memory.assert_called_once() - @patch.object(CrewAgentExecutorFlow, "kickoff") + @patch.object(AgentExecutor, "kickoff") def test_invoke_failure_no_agent_finish(self, mock_kickoff, mock_dependencies): """Test invoke fails without AgentFinish.""" - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) executor.state.current_answer = AgentAction( thought="thinking", tool="test", tool_input="test", text="action text" ) @@ -442,10 +442,10 @@ class TestFlowInvoke: with pytest.raises(RuntimeError, match="without reaching a final answer"): executor.invoke(inputs) - @patch.object(CrewAgentExecutorFlow, "kickoff") - @patch.object(CrewAgentExecutorFlow, "_create_short_term_memory") - @patch.object(CrewAgentExecutorFlow, "_create_long_term_memory") - @patch.object(CrewAgentExecutorFlow, "_create_external_memory") + @patch.object(AgentExecutor, "kickoff") + @patch.object(AgentExecutor, "_create_short_term_memory") + @patch.object(AgentExecutor, "_create_long_term_memory") + @patch.object(AgentExecutor, "_create_external_memory") def test_invoke_with_system_prompt( self, mock_external_memory, @@ -459,7 +459,7 @@ class TestFlowInvoke: "system": "System: {input}", "user": "User: {input} {tool_names} {tools}", } - executor = CrewAgentExecutorFlow(**mock_dependencies) + executor = AgentExecutor(**mock_dependencies) def mock_kickoff_side_effect(): executor.state.current_answer = AgentFinish(