This commit is contained in:
João Moura
2025-05-28 20:49:05 -07:00
parent 224ba1fb69
commit e0cd41e9f9
7 changed files with 307 additions and 114 deletions

View File

@@ -139,7 +139,7 @@ class Agent(BaseAgent):
default=None,
description="Interval of steps after which the agent should reason again during execution. If None, reasoning only happens before execution.",
)
@field_validator('reasoning_interval')
@classmethod
def validate_reasoning_interval(cls, v):
@@ -181,6 +181,9 @@ class Agent(BaseAgent):
def post_init_setup(self):
self.agent_ops_agent_name = self.role
if getattr(self, "adaptive_reasoning", False) and not getattr(self, "reasoning", False):
self.reasoning = True
self.llm = create_llm(self.llm)
if self.function_calling_llm and not isinstance(
self.function_calling_llm, BaseLLM
@@ -259,10 +262,10 @@ class Agent(BaseAgent):
if self.reasoning:
try:
from crewai.utilities.reasoning_handler import AgentReasoning, AgentReasoningOutput
reasoning_handler = AgentReasoning(task=task, agent=self)
reasoning_output: AgentReasoningOutput = reasoning_handler.handle_agent_reasoning()
# Add the reasoning plan to the task description
task.description += f"\n\nReasoning Plan:\n{reasoning_output.plan.plan}"
except Exception as e:
@@ -270,9 +273,9 @@ class Agent(BaseAgent):
self._logger.log("error", f"Error during reasoning process: {str(e)}")
else:
print(f"Error during reasoning process: {str(e)}")
self._inject_date_to_task(task)
if self.tools_handler:
self.tools_handler.last_used_tool = {} # type: ignore # Incompatible types in assignment (expression has type "dict[Never, Never]", variable has type "ToolCalling")
@@ -640,10 +643,10 @@ class Agent(BaseAgent):
try:
valid_format_codes = ['%Y', '%m', '%d', '%H', '%M', '%S', '%B', '%b', '%A', '%a']
is_valid = any(code in self.date_format for code in valid_format_codes)
if not is_valid:
raise ValueError(f"Invalid date format: {self.date_format}")
current_date: str = datetime.now().strftime(self.date_format)
task.description += f"\n\nCurrent Date: {current_date}"
except Exception as e:

View File

@@ -195,7 +195,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._handle_mid_execution_reasoning()
else:
self.steps_since_reasoning += 1
self._invoke_step_callback(formatted_answer)
self._append_message(formatted_answer.text, role="assistant")
@@ -243,7 +243,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
if hasattr(formatted_answer, 'tool') and formatted_answer.tool:
if formatted_answer.tool not in self.tools_used:
self.tools_used.append(formatted_answer.tool)
# Special case for add_image_tool
add_image_tool = self._i18n.tools("add_image")
if (
@@ -462,48 +462,63 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
),
color="red",
)
def _should_trigger_reasoning(self) -> bool:
"""
Determine if mid-execution reasoning should be triggered.
Returns:
bool: True if reasoning should be triggered, False otherwise.
"""
if not hasattr(self.agent, "reasoning") or not self.agent.reasoning:
return False
if hasattr(self.agent, "reasoning_interval") and self.agent.reasoning_interval is not None:
return self.steps_since_reasoning >= self.agent.reasoning_interval
if hasattr(self.agent, "adaptive_reasoning") and self.agent.adaptive_reasoning:
return self._should_adaptive_reason()
return False
def _should_adaptive_reason(self) -> bool:
"""
Determine if adaptive reasoning should be triggered using LLM decision.
Fallback to error detection if LLM decision fails.
Returns:
bool: True if adaptive reasoning should be triggered, False otherwise.
"""
if self._has_recent_errors():
try:
from crewai.utilities.events.reasoning_events import AgentAdaptiveReasoningDecisionEvent
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
crewai_event_bus.emit(
self.agent,
AgentAdaptiveReasoningDecisionEvent(
agent_role=self.agent.role,
task_id=str(self.task.id),
should_reason=True,
reasoning="Recent error indicators detected in previous messages.",
),
)
except Exception:
pass
return True
try:
from crewai.utilities.reasoning_handler import AgentReasoning
from crewai.agent import Agent
current_progress = self._summarize_current_progress()
reasoning_handler = AgentReasoning(task=self.task, agent=cast(Agent, self.agent))
return reasoning_handler.should_adaptive_reason_llm(
current_steps=self.iterations,
tools_used=list(self.tools_used),
current_progress=current_progress
current_progress=current_progress,
)
except Exception as e:
self._printer.print(
@@ -511,75 +526,77 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
color="yellow",
)
return False
def _has_recent_errors(self) -> bool:
"""Check for error indicators in recent messages."""
error_indicators = ["error", "exception", "failed", "unable to", "couldn't"]
recent_messages = self.messages[-3:] if len(self.messages) >= 3 else self.messages
for message in recent_messages:
content = message.get("content", "").lower()
if any(indicator in content for indicator in error_indicators):
return True
return False
def _handle_mid_execution_reasoning(self) -> None:
"""
Handle mid-execution reasoning by calling the reasoning handler.
"""
if not hasattr(self.agent, "reasoning") or not self.agent.reasoning:
return
try:
from crewai.utilities.reasoning_handler import AgentReasoning
current_progress = self._summarize_current_progress()
from crewai.agent import Agent
reasoning_handler = AgentReasoning(task=self.task, agent=cast(Agent, self.agent))
reasoning_output = reasoning_handler.handle_mid_execution_reasoning(
current_steps=self.iterations,
tools_used=list(self.tools_used),
current_progress=current_progress,
iteration_messages=self.messages
)
self.messages.append({
"role": "system",
"content": self._i18n.retrieve("reasoning", "mid_execution_reasoning_update").format(plan=reasoning_output.plan.plan)
})
self._append_message(
self._i18n.retrieve("reasoning", "mid_execution_reasoning_update").format(
plan=reasoning_output.plan.plan
),
role="assistant",
)
self.steps_since_reasoning = 0
except Exception as e:
self._printer.print(
content=f"Error during mid-execution reasoning: {str(e)}",
color="red",
)
def _summarize_current_progress(self) -> str:
"""
Create a summary of the current execution progress.
Returns:
str: A summary of the current progress.
"""
recent_messages = self.messages[-5:] if len(self.messages) >= 5 else self.messages
summary = f"After {self.iterations} steps, "
if self.tools_used:
unique_tools = set(self.tools_used)
summary += f"I've used {len(self.tools_used)} tools ({', '.join(unique_tools)}). "
else:
summary += "I haven't used any tools yet. "
if recent_messages:
last_message = recent_messages[-1].get("content", "")
if len(last_message) > 100:
last_message = last_message[:100] + "..."
summary += f"Most recent action: {last_message}"
return summary

View File

@@ -25,7 +25,7 @@
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
"feedback_instructions": "User feedback: {feedback}\nInstructions: Use this feedback to enhance the next output iteration.\nNote: Do not respond or add commentary.",
"lite_agent_system_prompt_with_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
"lite_agent_system_prompt_with_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the complete final answer to the original input question\n```",
"lite_agent_system_prompt_without_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"lite_agent_response_format": "\nIMPORTANT: Your final answer MUST contain all the information requested in the following format: {response_format}\n\nIMPORTANT: Ensure the final output does not include any code block markers like ```json or ```python.",
"knowledge_search_query": "The original query is: {task_prompt}.",
@@ -55,10 +55,10 @@
"reasoning": {
"initial_plan": "You are {role}, a professional with the following background: {backstory}\n\nYour primary goal is: {goal}\n\nAs {role}, you are creating a strategic plan for a task that requires your expertise and unique perspective.",
"refine_plan": "You are {role}, a professional with the following background: {backstory}\n\nYour primary goal is: {goal}\n\nAs {role}, you are refining a strategic plan for a task that requires your expertise and unique perspective.",
"create_plan_prompt": "You are {role} with this background: {backstory}\n\nYour primary goal is: {goal}\n\nYou have been assigned the following task:\n{description}\n\nExpected output:\n{expected_output}\n\nAvailable tools: {tools}\n\nBefore executing this task, create a detailed plan that leverages your expertise as {role} and outlines:\n1. Your understanding of the task from your professional perspective\n2. The key steps you'll take to complete it, drawing on your background and skills\n3. How you'll approach any challenges that might arise, considering your expertise\n4. How you'll strategically use the available tools based on your experience, exactly what tools to use and how to use them\n5. The expected outcome and how it aligns with your goal\n\nAfter creating your plan, assess whether you feel ready to execute the task or if you could do better.\nConclude with one of these statements:\n- \"READY: I am ready to execute the task.\"\n- \"NOT READY: I need to refine my plan because [specific reason].\"",
"refine_plan_prompt": "You are {role} with this background: {backstory}\n\nYour primary goal is: {goal}\n\nYou created the following plan for this task:\n{current_plan}\n\nHowever, you indicated that you're not ready to execute the task yet.\n\nPlease refine your plan further, drawing on your expertise as {role} to address any gaps or uncertainties. As you refine your plan, be specific about which available tools you will use, how you will use them, and why they are the best choices for each step. Clearly outline your tool usage strategy as part of your improved plan.\n\nAfter refining your plan, assess whether you feel ready to execute the task.\nConclude with one of these statements:\n- \"READY: I am ready to execute the task.\"\n- \"NOT READY: I need to refine my plan further because [specific reason].\"",
"create_plan_prompt": "You are {role} with this background: {backstory}\n\nYour primary goal is: {goal}\n\nYou have been assigned the following task:\n{description}\n\nExpected output:\n{expected_output}\n\nAvailable tools: {tools}\n\nBefore executing this task, create a detailed plan that leverages your expertise as {role} and outlines:\n1. Your understanding of the task from your professional perspective\n2. The key steps you'll take to complete it, drawing on your background and skills\n3. How you'll approach any challenges that might arise, considering your expertise\n4. How you'll strategically use the available tools based on your experience, exactly what tools to use and how to use them\n5. The expected outcome and how it aligns with your goal\n\nRemember: your ultimate objective is to produce the most COMPLETE Final Answer that fully meets the **Expected output** criteria.\n\nAfter creating your plan, assess whether you feel ready to execute the task or if you could do better.\nConclude with one of these statements:\n- \"READY: I am ready to execute the task.\"\n- \"NOT READY: I need to refine my plan because [specific reason].\"",
"refine_plan_prompt": "You are {role} with this background: {backstory}\n\nYour primary goal is: {goal}\n\nYou created the following plan for this task:\n{current_plan}\n\nHowever, you indicated that you're not ready to execute the task yet.\n\nPlease refine your plan further, drawing on your expertise as {role} to address any gaps or uncertainties. As you refine your plan, be specific about which available tools you will use, how you will use them, and why they are the best choices for each step. Clearly outline your tool usage strategy as part of your improved plan.\n\nMake sure your refined strategy directly guides you toward producing the most COMPLETE Final Answer that fully satisfies the **Expected output**.\n\nAfter refining your plan, assess whether you feel ready to execute the task.\nConclude with one of these statements:\n- \"READY: I am ready to execute the task.\"\n- \"NOT READY: I need to refine my plan further because [specific reason].\"",
"adaptive_reasoning_decision": "You are {role}, a professional with the following background: {backstory}\n\nYour primary goal is: {goal}\n\nAs {role}, you are currently executing a task and need to decide whether to pause and reassess your plan based on the current context.",
"mid_execution_reasoning": "You are currently executing a task and need to reassess your plan based on progress so far.\n\nTASK DESCRIPTION:\n{description}\n\nEXPECTED OUTPUT:\n{expected_output}\n\nCURRENT PROGRESS:\nSteps completed: {current_steps}\nTools used: {tools_used}\nProgress summary: {current_progress}\n\nRECENT CONVERSATION:\n{recent_messages}\n\nBased on the current progress and context, please reassess your plan for completing this task.\nConsider what has been accomplished, what challenges you've encountered, and what steps remain.\nAdjust your strategy if needed or confirm your current approach is still optimal.\n\nProvide a detailed updated plan for completing the task.\nEnd with \"READY: I am ready to continue executing the task.\" if you're confident in your plan.",
"mid_execution_reasoning": "You are currently executing a task and need to reassess your plan based on progress so far.\n\nTASK DESCRIPTION:\n{description}\n\nEXPECTED OUTPUT:\n{expected_output}\n\nCURRENT PROGRESS:\nSteps completed: {current_steps}\nTools used: {tools_used}\nProgress summary: {current_progress}\n\nRECENT CONVERSATION:\n{recent_messages}\n\nYour reassessment MUST focus on steering the remaining work toward a FINAL ANSWER that is as complete as possible and perfectly matches the **Expected output**.\n\nBased on the current progress and context, please reassess your plan for completing this task.\nConsider what has been accomplished, what challenges you've encountered, and what steps remain.\nAdjust your strategy if needed or confirm your current approach is still optimal.\n\nProvide a detailed updated plan for completing the task.\nEnd with \"READY: I am ready to continue executing the task.\" if you're confident in your plan.",
"mid_execution_plan": "You are {role}, a professional with the following background: {backstory}\n\nYour primary goal is: {goal}\n\nAs {role}, you are reassessing your plan during task execution based on the progress made so far.",
"mid_execution_reasoning_update": "I've reassessed my approach based on progress so far. Updated plan:\n\n{plan}",
"adaptive_reasoning_context": "\n\nTASK DESCRIPTION:\n{description}\n\nEXPECTED OUTPUT:\n{expected_output}\n\nCURRENT EXECUTION CONTEXT:\n- Steps completed: {current_steps}\n- Tools used: {tools_used}\n- Progress summary: {current_progress}\n\nConsider whether the current approach is optimal or if a strategic pause to reassess would be beneficial. You should reason when:\n- You might be approaching the task inefficiently\n- The context suggests a different strategy might be better\n- You're uncertain about the next steps\n- The progress suggests you need to reconsider your approach\n\nDecide whether reasoning/re-planning is needed at this point."

View File

@@ -61,6 +61,8 @@ from .reasoning_events import (
AgentReasoningStartedEvent,
AgentReasoningCompletedEvent,
AgentReasoningFailedEvent,
AgentMidExecutionReasoningStartedEvent,
AgentMidExecutionReasoningCompletedEvent,
)
@@ -421,8 +423,6 @@ class EventListener(BaseEventListener):
self.formatter.current_crew_tree,
)
# ----------- REASONING EVENTS -----------
@crewai_event_bus.on(AgentReasoningStartedEvent)
def on_agent_reasoning_started(source, event: AgentReasoningStartedEvent):
self.formatter.handle_reasoning_started(
@@ -446,5 +446,37 @@ class EventListener(BaseEventListener):
self.formatter.current_crew_tree,
)
@crewai_event_bus.on(AgentMidExecutionReasoningStartedEvent)
def on_mid_execution_reasoning_started(source, event: AgentMidExecutionReasoningStartedEvent):
self.formatter.handle_reasoning_started(
self.formatter.current_agent_branch,
event.attempt if hasattr(event, "attempt") else 1,
self.formatter.current_crew_tree,
current_step=event.current_step,
reasoning_trigger=event.reasoning_trigger,
)
@crewai_event_bus.on(AgentMidExecutionReasoningCompletedEvent)
def on_mid_execution_reasoning_completed(source, event: AgentMidExecutionReasoningCompletedEvent):
self.formatter.handle_reasoning_completed(
event.updated_plan,
True,
self.formatter.current_crew_tree,
duration_seconds=event.duration_seconds,
current_step=event.current_step,
reasoning_trigger=event.reasoning_trigger,
)
from crewai.utilities.events.reasoning_events import AgentAdaptiveReasoningDecisionEvent
@crewai_event_bus.on(AgentAdaptiveReasoningDecisionEvent)
def on_adaptive_reasoning_decision(source, event: AgentAdaptiveReasoningDecisionEvent):
self.formatter.handle_adaptive_reasoning_decision(
self.formatter.current_agent_branch,
event.should_reason,
event.reasoning,
self.formatter.current_crew_tree,
)
event_listener = EventListener()

View File

@@ -34,7 +34,7 @@ class AgentReasoningFailedEvent(BaseEvent):
class AgentMidExecutionReasoningStartedEvent(BaseEvent):
"""Event emitted when an agent starts mid-execution reasoning."""
type: str = "agent_mid_execution_reasoning_started"
agent_role: str
task_id: str
@@ -44,7 +44,7 @@ class AgentMidExecutionReasoningStartedEvent(BaseEvent):
class AgentMidExecutionReasoningCompletedEvent(BaseEvent):
"""Event emitted when an agent completes mid-execution reasoning."""
type: str = "agent_mid_execution_reasoning_completed"
agent_role: str
task_id: str
@@ -52,3 +52,14 @@ class AgentMidExecutionReasoningCompletedEvent(BaseEvent):
updated_plan: str
reasoning_trigger: str
duration_seconds: float = 0.0 # Time taken for reasoning in seconds
class AgentAdaptiveReasoningDecisionEvent(BaseEvent):
"""Event emitted after the agent decides whether to trigger adaptive reasoning."""
type: str = "agent_adaptive_reasoning_decision"
agent_role: str
task_id: str
should_reason: bool # Whether the agent decided to reason
reasoning: str # Brief explanation / rationale from the LLM
reasoning_trigger: str = "adaptive" # Always adaptive for this event

View File

@@ -17,6 +17,10 @@ class ConsoleFormatter:
current_lite_agent_branch: Optional[Tree] = None
tool_usage_counts: Dict[str, int] = {}
current_reasoning_branch: Optional[Tree] = None # Track reasoning status
current_adaptive_decision_branch: Optional[Tree] = None # Track last adaptive decision branch
# Spinner support
_spinner_frames = ["", "", "", "", "", "", "", "", "", ""]
_spinner_index: int = 0
def __init__(self, verbose: bool = False):
self.console = Console(width=None)
@@ -218,6 +222,13 @@ class ConsoleFormatter:
# Set the current_task_branch attribute directly
self.current_task_branch = task_branch
# When a new task starts, clear pointers to previous agent, reasoning,
# and tool branches so that any upcoming Reasoning / Tool logs attach
# to the correct task.
self.current_agent_branch = None
self.current_reasoning_branch = None
self.current_tool_branch = None
return task_branch
def update_task_status(
@@ -265,6 +276,14 @@ class ConsoleFormatter:
)
self.print_panel(content, panel_title, style)
# Clear task-scoped pointers after the task is finished so subsequent
# events don't mistakenly attach to the old task branch.
if status in {"completed", "failed"}:
self.current_task_branch = None
self.current_agent_branch = None
self.current_tool_branch = None
self.current_reasoning_branch = None
def create_agent_branch(
self, task_branch: Optional[Tree], agent_role: str, crew_tree: Optional[Tree]
) -> Optional[Tree]:
@@ -463,9 +482,10 @@ class ConsoleFormatter:
self.current_tool_branch = tool_branch
# Update label with current count
spinner = self._next_spinner()
self.update_tree_label(
tool_branch,
"🔧",
f"🔧 {spinner}",
f"Using {tool_name} ({self.tool_usage_counts[tool_name]})",
"yellow",
)
@@ -494,7 +514,7 @@ class ConsoleFormatter:
# Update the existing tool node's label
self.update_tree_label(
tool_branch,
"🔧",
"🔧",
f"Used {tool_name} ({self.tool_usage_counts[tool_name]})",
"green",
)
@@ -567,7 +587,8 @@ class ConsoleFormatter:
# Only add thinking status if we don't have a current tool branch
if self.current_tool_branch is None:
tool_branch = branch_to_use.add("")
self.update_tree_label(tool_branch, "🧠", "Thinking...", "blue")
spinner = self._next_spinner()
self.update_tree_label(tool_branch, f"🧠 {spinner}", "Thinking...", "blue")
self.current_tool_branch = tool_branch
self.print(tree_to_use)
self.print()
@@ -1067,12 +1088,16 @@ class ConsoleFormatter:
if not self.verbose:
return None
# Prefer LiteAgent > Agent > Task branch as the parent for reasoning
branch_to_use = (
self.current_lite_agent_branch
or agent_branch
or self.current_task_branch
)
# Prefer to nest under the latest adaptive decision branch when this is a
# mid-execution reasoning cycle so the tree indents nicely.
if current_step is not None and self.current_adaptive_decision_branch is not None:
branch_to_use = self.current_adaptive_decision_branch
else:
branch_to_use = (
self.current_lite_agent_branch
or agent_branch
or self.current_task_branch
)
# We always want to render the full crew tree when possible so the
# Live view updates coherently. Fallbacks: crew tree → branch itself.
@@ -1091,12 +1116,16 @@ class ConsoleFormatter:
# Build label text depending on attempt and whether it's mid-execution
if current_step is not None:
trigger_text = f" ({reasoning_trigger})" if reasoning_trigger else ""
status_text = f"Mid-Execution Reasoning at step {current_step}{trigger_text}"
status_text = f"Mid-Execution Reasoning{trigger_text}"
else:
status_text = (
f"Reasoning (Attempt {attempt})" if attempt > 1 else "Reasoning..."
)
self.update_tree_label(reasoning_branch, "🧠", status_text, "blue")
# ⠋ is the first frame of a braille spinner visually hints progress even
# without true animation.
spinner = self._next_spinner()
self.update_tree_label(reasoning_branch, f"🧠 {spinner}", status_text, "yellow")
self.print(tree_to_use)
self.print()
@@ -1125,15 +1154,29 @@ class ConsoleFormatter:
)
style = "green" if ready else "yellow"
duration_text = f" ({duration_seconds:.2f}s)" if duration_seconds > 0 else ""
if current_step is not None:
trigger_text = f" ({reasoning_trigger})" if reasoning_trigger else ""
status_text = f"Mid-Execution Reasoning Completed at step {current_step}{trigger_text}{duration_text}"
else:
status_text = f"Reasoning Completed{duration_text}" if ready else f"Reasoning Completed (Not Ready){duration_text}"
# Build duration part separately for cleaner formatting
duration_part = f"{duration_seconds:.2f}s" if duration_seconds > 0 else ""
if reasoning_branch is not None:
if current_step is not None:
# Build label manually to style duration differently and omit trigger info.
if reasoning_branch is not None:
label = Text()
label.append("", style=f"{style} bold")
label.append("Mid-Execution Reasoning Completed", style=style)
if duration_part:
label.append(f" ({duration_part})", style="cyan")
reasoning_branch.label = label
status_text = None # Already set label manually
else:
status_text = (
f"Reasoning Completed ({duration_part})" if duration_part else "Reasoning Completed"
) if ready else (
f"Reasoning Completed (Not Ready • {duration_part})" if duration_part else "Reasoning Completed (Not Ready)"
)
# If we didn't build a custom label (non-mid-execution case), use helper
if status_text and reasoning_branch is not None:
self.update_tree_label(reasoning_branch, "", status_text, style)
if tree_to_use is not None:
@@ -1141,11 +1184,14 @@ class ConsoleFormatter:
# Show plan in a panel (trim very long plans)
if plan:
# Derive duration text for panel title
duration_text = f" ({duration_part})" if duration_part else ""
if current_step is not None:
title = f"🧠 Mid-Execution Reasoning Plan (Step {current_step}){duration_text}"
title = f"🧠 Mid-Execution Reasoning Plan{duration_text}"
else:
title = f"🧠 Reasoning Plan{duration_text}"
plan_panel = Panel(
Text(plan, style="white"),
title=title,
@@ -1159,6 +1205,10 @@ class ConsoleFormatter:
# Clear stored branch after completion
self.current_reasoning_branch = None
# After reasoning finished, we also clear the adaptive decision branch to
# avoid nesting unrelated future nodes.
self.current_adaptive_decision_branch = None
def handle_reasoning_failed(
self,
error: str,
@@ -1193,3 +1243,70 @@ class ConsoleFormatter:
# Clear stored branch after failure
self.current_reasoning_branch = None
# ----------- ADAPTIVE REASONING DECISION EVENTS -----------
def handle_adaptive_reasoning_decision(
self,
agent_branch: Optional[Tree],
should_reason: bool,
reasoning: str,
crew_tree: Optional[Tree],
) -> None:
"""Render the decision on whether to trigger adaptive reasoning."""
if not self.verbose:
return
# Prefer LiteAgent > Agent > Task as parent
branch_to_use = (
self.current_lite_agent_branch
or agent_branch
or self.current_task_branch
)
tree_to_use = self.current_crew_tree or crew_tree or branch_to_use
if branch_to_use is None or tree_to_use is None:
return
decision_branch = branch_to_use.add("")
decision_text = "YES" if should_reason else "NO"
style = "green" if should_reason else "yellow"
self.update_tree_label(
decision_branch,
"🤔",
f"Adaptive Reasoning Decision: {decision_text}",
style,
)
# Print tree first (live update)
self.print(tree_to_use)
# Also show explanation if available
if reasoning:
truncated_reasoning = reasoning[:500] + "..." if len(reasoning) > 500 else reasoning
panel = Panel(
Text(truncated_reasoning, style="white"),
title="🤔 Adaptive Reasoning Rationale",
border_style=style,
padding=(1, 2),
)
self.print(panel)
self.print()
# Store the decision branch so that subsequent mid-execution reasoning nodes
# can be rendered as children of this decision (for better indentation).
self.current_adaptive_decision_branch = decision_branch
# ------------------------------------------------------------------
# Spinner helpers
# ------------------------------------------------------------------
def _next_spinner(self) -> str:
"""Return next spinner frame."""
frame = self._spinner_frames[self._spinner_index]
self._spinner_index = (self._spinner_index + 1) % len(self._spinner_frames)
return frame

View File

@@ -57,7 +57,7 @@ class AgentReasoning:
"""
import time
start_time = time.time()
# Emit a reasoning started event (attempt 1)
try:
crewai_event_bus.emit(
@@ -74,7 +74,7 @@ class AgentReasoning:
try:
output = self.__handle_agent_reasoning()
duration_seconds = time.time() - start_time
# Emit reasoning completed event
@@ -391,14 +391,14 @@ class AgentReasoning:
"The _handle_agent_reasoning method is deprecated. Use handle_agent_reasoning instead."
)
return self.handle_agent_reasoning()
def _emit_reasoning_event(self, event_class, **kwargs):
"""Centralized method for emitting reasoning events."""
try:
reasoning_trigger = "interval"
if hasattr(self.agent, 'adaptive_reasoning') and self.agent.adaptive_reasoning:
reasoning_trigger = "adaptive"
crewai_event_bus.emit(
self.agent,
event_class(
@@ -411,9 +411,9 @@ class AgentReasoning:
except Exception:
# Ignore event bus errors to avoid breaking execution
pass
def handle_mid_execution_reasoning(
self,
self,
current_steps: int,
tools_used: list,
current_progress: str,
@@ -421,21 +421,21 @@ class AgentReasoning:
) -> AgentReasoningOutput:
"""
Handle reasoning during task execution with context about current progress.
Args:
current_steps: Number of steps executed so far
tools_used: List of tools that have been used
current_progress: Summary of progress made so far
iteration_messages: Recent conversation messages
Returns:
AgentReasoningOutput: Updated reasoning plan based on current context
"""
import time
start_time = time.time()
from crewai.utilities.events.reasoning_events import AgentMidExecutionReasoningStartedEvent
self._emit_reasoning_event(
AgentMidExecutionReasoningStartedEvent,
current_step=current_steps
@@ -445,12 +445,12 @@ class AgentReasoning:
output = self.__handle_mid_execution_reasoning(
current_steps, tools_used, current_progress, iteration_messages
)
duration_seconds = time.time() - start_time
# Emit completed event
from crewai.utilities.events.reasoning_events import AgentMidExecutionReasoningCompletedEvent
self._emit_reasoning_event(
AgentMidExecutionReasoningCompletedEvent,
current_step=current_steps,
@@ -462,7 +462,7 @@ class AgentReasoning:
except Exception as e:
# Emit failed event
from crewai.utilities.events.reasoning_events import AgentReasoningFailedEvent
self._emit_reasoning_event(
AgentReasoningFailedEvent,
error=str(e),
@@ -470,9 +470,9 @@ class AgentReasoning:
)
raise
def __handle_mid_execution_reasoning(
self,
self,
current_steps: int,
tools_used: list,
current_progress: str,
@@ -480,13 +480,13 @@ class AgentReasoning:
) -> AgentReasoningOutput:
"""
Private method that handles the mid-execution reasoning process.
Args:
current_steps: Number of steps executed so far
tools_used: List of tools that have been used
current_progress: Summary of progress made so far
iteration_messages: Recent conversation messages
Returns:
AgentReasoningOutput: The output of the mid-execution reasoning process.
"""
@@ -515,9 +515,9 @@ class AgentReasoning:
reasoning_plan = ReasoningPlan(plan=plan, ready=ready)
return AgentReasoningOutput(plan=reasoning_plan)
def __create_mid_execution_prompt(
self,
self,
current_steps: int,
tools_used: list,
current_progress: str,
@@ -525,18 +525,18 @@ class AgentReasoning:
) -> str:
"""
Creates a prompt for the agent to reason during task execution.
Args:
current_steps: Number of steps executed so far
tools_used: List of tools that have been used
current_progress: Summary of progress made so far
iteration_messages: Recent conversation messages
Returns:
str: The mid-execution reasoning prompt.
"""
tools_used_str = ", ".join(tools_used) if tools_used else "No tools used yet"
recent_messages = ""
if iteration_messages:
recent_msgs = iteration_messages[-6:] if len(iteration_messages) > 6 else iteration_messages
@@ -545,7 +545,7 @@ class AgentReasoning:
content = msg.get("content", "")
if content:
recent_messages += f"{role.upper()}: {content[:200]}...\n\n"
return self.i18n.retrieve("reasoning", "mid_execution_reasoning").format(
description=self.task.description,
expected_output=self.task.expected_output,
@@ -554,21 +554,21 @@ class AgentReasoning:
current_progress=current_progress,
recent_messages=recent_messages
)
def should_adaptive_reason_llm(
self,
self,
current_steps: int,
tools_used: list,
current_progress: str
) -> bool:
"""
Use LLM function calling to determine if adaptive reasoning should be triggered.
Args:
current_steps: Number of steps executed so far
tools_used: List of tools that have been used
current_progress: Summary of progress made so far
Returns:
bool: True if reasoning should be triggered, False otherwise.
"""
@@ -578,16 +578,29 @@ class AgentReasoning:
)
if self.llm.supports_function_calling():
should_reason = self.__call_adaptive_reasoning_function(decision_prompt)
should_reason, reasoning_expl = self.__call_adaptive_reasoning_function(decision_prompt)
else:
should_reason = self.__call_adaptive_reasoning_text(decision_prompt)
should_reason, reasoning_expl = self.__call_adaptive_reasoning_text(decision_prompt)
# Emit an event so the UI/console can display the decision
try:
from crewai.utilities.events.reasoning_events import AgentAdaptiveReasoningDecisionEvent
self._emit_reasoning_event(
AgentAdaptiveReasoningDecisionEvent,
should_reason=should_reason,
reasoning=reasoning_expl,
)
except Exception:
# Ignore event bus errors to avoid breaking execution
pass
return should_reason
except Exception as e:
self.logger.warning(f"Error during adaptive reasoning decision: {str(e)}. Defaulting to no reasoning.")
return False
def __call_adaptive_reasoning_function(self, prompt: str) -> bool:
def __call_adaptive_reasoning_function(self, prompt: str) -> tuple[bool, str]:
"""Call LLM with function calling for adaptive reasoning decision."""
function_schema = {
"type": "function",
@@ -602,7 +615,7 @@ class AgentReasoning:
"description": "Whether reasoning/re-planning is needed at this point in task execution."
},
"reasoning": {
"type": "string",
"type": "string",
"description": "Brief explanation of why reasoning is or isn't needed."
}
},
@@ -632,11 +645,11 @@ class AgentReasoning:
try:
result = json.loads(response)
return result.get("should_reason", False)
return result.get("should_reason", False), result.get("reasoning", "No explanation provided")
except (json.JSONDecodeError, KeyError):
return False
return False, "No explanation provided"
def __call_adaptive_reasoning_text(self, prompt: str) -> bool:
def __call_adaptive_reasoning_text(self, prompt: str) -> tuple[bool, str]:
"""Fallback text-based adaptive reasoning decision."""
system_prompt = self.i18n.retrieve("reasoning", "adaptive_reasoning_decision").format(
role=self.agent.role,
@@ -649,24 +662,24 @@ class AgentReasoning:
{"role": "user", "content": prompt + "\n\nRespond with 'YES' if reasoning is needed, 'NO' if not."}
])
return "YES" in str(response).upper()
return "YES" in str(response).upper(), "No explanation provided"
def __create_adaptive_reasoning_decision_prompt(
self,
self,
current_steps: int,
tools_used: list,
current_progress: str
) -> str:
"""Create prompt for adaptive reasoning decision."""
tools_used_str = ", ".join(tools_used) if tools_used else "No tools used yet"
# Use the prompt from i18n and format it with the current context
base_prompt = self.i18n.retrieve("reasoning", "adaptive_reasoning_decision").format(
role=self.agent.role,
goal=self.agent.goal,
backstory=self.__get_agent_backstory()
)
context_prompt = self.i18n.retrieve("reasoning", "adaptive_reasoning_context").format(
description=self.task.description,
expected_output=self.task.expected_output,
@@ -674,7 +687,7 @@ class AgentReasoning:
tools_used=tools_used_str,
current_progress=current_progress
)
prompt = base_prompt + context_prompt
return prompt