This commit is contained in:
lorenzejay
2025-11-18 07:38:11 -08:00
parent aa2ef71e35
commit b3c1780507
2 changed files with 3 additions and 12 deletions

View File

@@ -21,9 +21,7 @@ from typing_extensions import Self
from crewai.a2a.config import A2AConfig from crewai.a2a.config import A2AConfig
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.cache.cache_handler import CacheHandler from crewai.agents.cache.cache_handler import CacheHandler
from crewai.agents.crew_agent_executor import CrewAgentExecutor from crewai.agents.crew_agent_executor_flow import CrewAgentExecutorFlow
# from crewai.agents.crew_agent_executor_flow import CrewAgentExecutorFlow
from crewai.events.event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.knowledge_events import ( from crewai.events.types.knowledge_events import (
KnowledgeQueryCompletedEvent, KnowledgeQueryCompletedEvent,
@@ -99,7 +97,7 @@ class Agent(BaseAgent):
The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents. The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents.
Attributes: Attributes:
agent_executor: An instance of the CrewAgentExecutor class. agent_executor: An instance of the CrewAgentExecutor or CrewAgentExecutorFlow class.
role: The role of the agent. role: The role of the agent.
goal: The objective of the agent. goal: The objective of the agent.
backstory: The backstory of the agent. backstory: The backstory of the agent.
@@ -184,10 +182,6 @@ class Agent(BaseAgent):
default=None, default=None,
description="Maximum number of reasoning attempts before executing the task. If None, will try until ready.", description="Maximum number of reasoning attempts before executing the task. If None, will try until ready.",
) )
# use_flow_executor: bool = Field(
# default=False,
# description="Use Flow-based executor instead of traditional while-loop executor.",
# )
embedder: EmbedderConfig | None = Field( embedder: EmbedderConfig | None = Field(
default=None, default=None,
description="Embedder configuration for the agent.", description="Embedder configuration for the agent.",
@@ -654,7 +648,7 @@ class Agent(BaseAgent):
rpm_limit_fn=rpm_limit_fn, rpm_limit_fn=rpm_limit_fn,
) )
else: else:
self.agent_executor = CrewAgentExecutor( self.agent_executor = CrewAgentExecutorFlow(
llm=self.llm, llm=self.llm,
task=task, # type: ignore[arg-type] task=task, # type: ignore[arg-type]
agent=self, agent=self,

View File

@@ -289,10 +289,8 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
color="blue", color="blue",
) )
try: try:
# RPM enforcement (line 227)
enforce_rpm_limit(self.request_within_rpm_limit) enforce_rpm_limit(self.request_within_rpm_limit)
# LLM call with hooks (lines 229-238)
# Note: Hooks are already integrated in get_llm_response utility # Note: Hooks are already integrated in get_llm_response utility
answer = get_llm_response( answer = get_llm_response(
llm=self.llm, llm=self.llm,
@@ -304,7 +302,6 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
response_model=self.response_model, response_model=self.response_model,
executor_context=self, executor_context=self,
) )
print(f"answer for iteration: {self.state.iterations} is {answer}")
# Parse response (line 239) # Parse response (line 239)
formatted_answer = process_llm_response(answer, self.use_stop_words) formatted_answer = process_llm_response(answer, self.use_stop_words)