mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
Implement LLM-based adaptive reasoning with function calling
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -108,13 +108,12 @@ This creates a predictable pattern of reflection during task execution, which is
|
||||
|
||||
### Adaptive Reasoning
|
||||
|
||||
The adaptive reasoning feature uses contextual triggers to determine when reasoning should occur:
|
||||
The adaptive reasoning feature uses LLM function calling to determine when reasoning should occur:
|
||||
|
||||
1. **Multiple tools used**: When the agent has used multiple different tools in recent steps, indicating a change in approach
|
||||
2. **Long execution**: When the task is taking longer than expected (iterations > max_iter/2)
|
||||
3. **Error detection**: When recent messages contain error indicators like "error", "exception", "failed", etc.
|
||||
1. **LLM-based decision**: The agent's LLM evaluates the current execution context (task description, expected output, steps taken so far) to decide if reasoning is needed
|
||||
2. **Error detection fallback**: When recent messages contain error indicators like "error", "exception", "failed", etc., reasoning is automatically triggered
|
||||
|
||||
This creates a more dynamic reasoning pattern that adapts to the task's needs, allowing the agent to be more responsive to changing conditions.
|
||||
This creates an intelligent reasoning pattern where the agent uses its own judgment to determine when strategic reassessment would be most beneficial, while maintaining automatic error recovery.
|
||||
|
||||
### Mid-execution Reasoning Process
|
||||
|
||||
|
||||
@@ -483,26 +483,34 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
|
||||
def _should_adaptive_reason(self) -> bool:
|
||||
"""
|
||||
Determine if adaptive reasoning should be triggered based on execution context.
|
||||
Determine if adaptive reasoning should be triggered using LLM decision.
|
||||
Fallback to error detection if LLM decision fails.
|
||||
|
||||
Returns:
|
||||
bool: True if adaptive reasoning should be triggered, False otherwise.
|
||||
"""
|
||||
return (
|
||||
self._has_used_multiple_tools_recently() or
|
||||
self._is_taking_too_long() or
|
||||
self._has_recent_errors()
|
||||
)
|
||||
if self._has_recent_errors():
|
||||
return True
|
||||
|
||||
def _has_used_multiple_tools_recently(self) -> bool:
|
||||
"""Check if multiple different tools were used in recent steps."""
|
||||
if len(self.tools_used) < 3:
|
||||
try:
|
||||
from crewai.utilities.reasoning_handler import AgentReasoning
|
||||
from crewai.agent import Agent
|
||||
|
||||
current_progress = self._summarize_current_progress()
|
||||
|
||||
reasoning_handler = AgentReasoning(task=self.task, agent=cast(Agent, self.agent))
|
||||
|
||||
return reasoning_handler.should_adaptive_reason_llm(
|
||||
current_steps=self.iterations,
|
||||
tools_used=list(self.tools_used),
|
||||
current_progress=current_progress
|
||||
)
|
||||
except Exception as e:
|
||||
self._printer.print(
|
||||
content=f"Error during adaptive reasoning decision: {str(e)}. Using fallback error detection.",
|
||||
color="yellow",
|
||||
)
|
||||
return False
|
||||
return len(set(list(self.tools_used)[-3:])) > 1
|
||||
|
||||
def _is_taking_too_long(self) -> bool:
|
||||
"""Check if iterations exceed expected duration."""
|
||||
return self.iterations > self.max_iter // 2
|
||||
|
||||
def _has_recent_errors(self) -> bool:
|
||||
"""Check for error indicators in recent messages."""
|
||||
|
||||
@@ -56,6 +56,7 @@
|
||||
"initial_plan": "You are {role}, a professional with the following background: {backstory}\n\nYour primary goal is: {goal}\n\nAs {role}, you are creating a strategic plan for a task that requires your expertise and unique perspective.",
|
||||
"refine_plan": "You are {role}, a professional with the following background: {backstory}\n\nYour primary goal is: {goal}\n\nAs {role}, you are refining a strategic plan for a task that requires your expertise and unique perspective.",
|
||||
"create_plan_prompt": "You are {role} with this background: {backstory}\n\nYour primary goal is: {goal}\n\nYou have been assigned the following task:\n{description}\n\nExpected output:\n{expected_output}\n\nAvailable tools: {tools}\n\nBefore executing this task, create a detailed plan that leverages your expertise as {role} and outlines:\n1. Your understanding of the task from your professional perspective\n2. The key steps you'll take to complete it, drawing on your background and skills\n3. How you'll approach any challenges that might arise, considering your expertise\n4. How you'll strategically use the available tools based on your experience, exactly what tools to use and how to use them\n5. The expected outcome and how it aligns with your goal\n\nAfter creating your plan, assess whether you feel ready to execute the task or if you could do better.\nConclude with one of these statements:\n- \"READY: I am ready to execute the task.\"\n- \"NOT READY: I need to refine my plan because [specific reason].\"",
|
||||
"refine_plan_prompt": "You are {role} with this background: {backstory}\n\nYour primary goal is: {goal}\n\nYou created the following plan for this task:\n{current_plan}\n\nHowever, you indicated that you're not ready to execute the task yet.\n\nPlease refine your plan further, drawing on your expertise as {role} to address any gaps or uncertainties. As you refine your plan, be specific about which available tools you will use, how you will use them, and why they are the best choices for each step. Clearly outline your tool usage strategy as part of your improved plan.\n\nAfter refining your plan, assess whether you feel ready to execute the task.\nConclude with one of these statements:\n- \"READY: I am ready to execute the task.\"\n- \"NOT READY: I need to refine my plan further because [specific reason].\""
|
||||
"refine_plan_prompt": "You are {role} with this background: {backstory}\n\nYour primary goal is: {goal}\n\nYou created the following plan for this task:\n{current_plan}\n\nHowever, you indicated that you're not ready to execute the task yet.\n\nPlease refine your plan further, drawing on your expertise as {role} to address any gaps or uncertainties. As you refine your plan, be specific about which available tools you will use, how you will use them, and why they are the best choices for each step. Clearly outline your tool usage strategy as part of your improved plan.\n\nAfter refining your plan, assess whether you feel ready to execute the task.\nConclude with one of these statements:\n- \"READY: I am ready to execute the task.\"\n- \"NOT READY: I need to refine my plan further because [specific reason].\"",
|
||||
"adaptive_reasoning_decision": "You are {role}, a professional with the following background: {backstory}\n\nYour primary goal is: {goal}\n\nAs {role}, you are currently executing a task and need to decide whether to pause and reassess your plan based on the current context."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -556,3 +556,129 @@ Adjust your strategy if needed or confirm your current approach is still optimal
|
||||
Provide a detailed updated plan for completing the task.
|
||||
End with "READY: I am ready to continue executing the task." if you're confident in your plan.
|
||||
"""
|
||||
|
||||
def should_adaptive_reason_llm(
|
||||
self,
|
||||
current_steps: int,
|
||||
tools_used: list,
|
||||
current_progress: str
|
||||
) -> bool:
|
||||
"""
|
||||
Use LLM function calling to determine if adaptive reasoning should be triggered.
|
||||
|
||||
Args:
|
||||
current_steps: Number of steps executed so far
|
||||
tools_used: List of tools that have been used
|
||||
current_progress: Summary of progress made so far
|
||||
|
||||
Returns:
|
||||
bool: True if reasoning should be triggered, False otherwise.
|
||||
"""
|
||||
try:
|
||||
decision_prompt = self.__create_adaptive_reasoning_decision_prompt(
|
||||
current_steps, tools_used, current_progress
|
||||
)
|
||||
|
||||
if self.llm.supports_function_calling():
|
||||
should_reason = self.__call_adaptive_reasoning_function(decision_prompt)
|
||||
else:
|
||||
should_reason = self.__call_adaptive_reasoning_text(decision_prompt)
|
||||
|
||||
return should_reason
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error during adaptive reasoning decision: {str(e)}. Defaulting to no reasoning.")
|
||||
return False
|
||||
|
||||
def __call_adaptive_reasoning_function(self, prompt: str) -> bool:
|
||||
"""Call LLM with function calling for adaptive reasoning decision."""
|
||||
function_schema = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "decide_reasoning_need",
|
||||
"description": "Decide whether reasoning is needed based on current task execution context",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"should_reason": {
|
||||
"type": "boolean",
|
||||
"description": "Whether reasoning/re-planning is needed at this point in task execution."
|
||||
},
|
||||
"reasoning": {
|
||||
"type": "string",
|
||||
"description": "Brief explanation of why reasoning is or isn't needed."
|
||||
}
|
||||
},
|
||||
"required": ["should_reason", "reasoning"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def _decide_reasoning_need(should_reason: bool, reasoning: str):
|
||||
"""Return the reasoning decision result in JSON string form."""
|
||||
return json.dumps({"should_reason": should_reason, "reasoning": reasoning})
|
||||
|
||||
system_prompt = self.i18n.retrieve("reasoning", "adaptive_reasoning_decision").format(
|
||||
role=self.agent.role,
|
||||
goal=self.agent.goal,
|
||||
backstory=self.__get_agent_backstory()
|
||||
)
|
||||
|
||||
response = self.llm.call(
|
||||
[
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
tools=[function_schema],
|
||||
available_functions={"decide_reasoning_need": _decide_reasoning_need},
|
||||
)
|
||||
|
||||
try:
|
||||
result = json.loads(response)
|
||||
return result.get("should_reason", False)
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
return False
|
||||
|
||||
def __call_adaptive_reasoning_text(self, prompt: str) -> bool:
|
||||
"""Fallback text-based adaptive reasoning decision."""
|
||||
system_prompt = self.i18n.retrieve("reasoning", "adaptive_reasoning_decision").format(
|
||||
role=self.agent.role,
|
||||
goal=self.agent.goal,
|
||||
backstory=self.__get_agent_backstory()
|
||||
)
|
||||
|
||||
response = self.llm.call([
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": prompt + "\n\nRespond with 'YES' if reasoning is needed, 'NO' if not."}
|
||||
])
|
||||
|
||||
return "YES" in str(response).upper()
|
||||
|
||||
def __create_adaptive_reasoning_decision_prompt(
|
||||
self,
|
||||
current_steps: int,
|
||||
tools_used: list,
|
||||
current_progress: str
|
||||
) -> str:
|
||||
"""Create prompt for adaptive reasoning decision."""
|
||||
tools_used_str = ", ".join(tools_used) if tools_used else "No tools used yet"
|
||||
|
||||
return f"""You are currently executing a task. Based on the information below, decide whether you should pause to reassess and update your plan.
|
||||
|
||||
TASK DESCRIPTION:
|
||||
{self.task.description}
|
||||
|
||||
EXPECTED OUTPUT:
|
||||
{self.task.expected_output}
|
||||
|
||||
CURRENT EXECUTION CONTEXT:
|
||||
- Steps completed: {current_steps}
|
||||
- Tools used: {tools_used_str}
|
||||
- Progress summary: {current_progress}
|
||||
|
||||
Consider whether the current approach is optimal or if a strategic pause to reassess would be beneficial. You should reason when:
|
||||
- You might be approaching the task inefficiently
|
||||
- The context suggests a different strategy might be better
|
||||
- You're uncertain about the next steps
|
||||
- The progress suggests you need to reconsider your approach
|
||||
|
||||
Decide whether reasoning/re-planning is needed at this point."""
|
||||
|
||||
Reference in New Issue
Block a user