fix: properly handle agent max iterations

fixes #3847
This commit is contained in:
Greyson LaLonde
2025-11-07 13:54:11 -05:00
committed by GitHub
parent 1ed307b58c
commit 19c5b9a35e
4 changed files with 560 additions and 16 deletions

View File

@@ -214,6 +214,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
llm=self.llm,
callbacks=self.callbacks,
)
break
enforce_rpm_limit(self.request_within_rpm_limit)
@@ -226,7 +227,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
from_agent=self.agent,
response_model=self.response_model,
)
formatted_answer = process_llm_response(answer, self.use_stop_words)
formatted_answer = process_llm_response(answer, self.use_stop_words) # type: ignore[assignment]
if isinstance(formatted_answer, AgentAction):
# Extract agent fingerprint if available
@@ -258,11 +259,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
formatted_answer, tool_result
)
self._invoke_step_callback(formatted_answer)
self._append_message(formatted_answer.text)
self._invoke_step_callback(formatted_answer) # type: ignore[arg-type]
self._append_message(formatted_answer.text) # type: ignore[union-attr,attr-defined]
except OutputParserError as e: # noqa: PERF203
formatted_answer = handle_output_parser_exception(
except OutputParserError as e:
formatted_answer = handle_output_parser_exception( # type: ignore[assignment]
e=e,
messages=self.messages,
iterations=self.iterations,

View File

@@ -127,7 +127,7 @@ def handle_max_iterations_exceeded(
messages: list[LLMMessage],
llm: LLM | BaseLLM,
callbacks: list[TokenCalcHandler],
) -> AgentAction | AgentFinish:
) -> AgentFinish:
"""Handles the case when the maximum number of iterations is exceeded. Performs one more LLM call to get the final answer.
Args:
@@ -139,7 +139,7 @@ def handle_max_iterations_exceeded(
callbacks: List of callbacks for the LLM call.
Returns:
The final formatted answer after exceeding max iterations.
AgentFinish with the final answer after exceeding max iterations.
"""
printer.print(
content="Maximum iterations reached. Requesting final answer.",
@@ -157,7 +157,7 @@ def handle_max_iterations_exceeded(
# Perform one more LLM call to get the final answer
answer = llm.call(
messages, # type: ignore[arg-type]
messages,
callbacks=callbacks,
)
@@ -168,8 +168,16 @@ def handle_max_iterations_exceeded(
)
raise ValueError("Invalid response from LLM call - None or empty.")
# Return the formatted answer, regardless of its type
return format_answer(answer=answer)
formatted = format_answer(answer=answer)
# If format_answer returned an AgentAction, convert it to AgentFinish
if isinstance(formatted, AgentFinish):
return formatted
return AgentFinish(
thought=formatted.thought,
output=formatted.text,
text=formatted.text,
)
def format_message_for_llm(
@@ -249,10 +257,10 @@ def get_llm_response(
"""
try:
answer = llm.call(
messages, # type: ignore[arg-type]
messages,
callbacks=callbacks,
from_task=from_task,
from_agent=from_agent,
from_agent=from_agent, # type: ignore[arg-type]
response_model=response_model,
)
except Exception as e:
@@ -294,8 +302,8 @@ def handle_agent_action_core(
formatted_answer: AgentAction,
tool_result: ToolResult,
messages: list[LLMMessage] | None = None,
step_callback: Callable | None = None,
show_logs: Callable | None = None,
step_callback: Callable | None = None, # type: ignore[type-arg]
show_logs: Callable | None = None, # type: ignore[type-arg]
) -> AgentAction | AgentFinish:
"""Core logic for handling agent actions and tool results.
@@ -481,7 +489,7 @@ def summarize_messages(
),
]
summary = llm.call(
messages, # type: ignore[arg-type]
messages,
callbacks=callbacks,
)
summarized_contents.append({"content": str(summary)})