Compare commits

...

5 Commits

Author SHA1 Message Date
Devin AI
06d922f452 Fix import sorting in test file using ruff
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:22:50 +00:00
Devin AI
3e8635f1e7 Fix import sorting in test file again
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:21:22 +00:00
Devin AI
e8a1169b85 Fix import sorting in test file
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:19:58 +00:00
Devin AI
64c1c4efa1 Fix linting error in test file
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:19:02 +00:00
Devin AI
46b5fc6538 Fix issue #2237: Properly handle LLM output with both Action and Final Answer
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:17:22 +00:00
2 changed files with 92 additions and 1 deletions

View File

@@ -232,7 +232,14 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._format_answer(answer)
except OutputParserException as e:
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
answer = answer.split("Observation:")[0].strip()
# If both Action and Final Answer are present, prioritize the Action
# by removing the Final Answer part
if "Final Answer:" in answer:
parts = answer.split("Final Answer:")
answer = parts[0].strip()
# If that doesn't work, try splitting at Observation
elif "Observation:" in answer:
answer = answer.split("Observation:")[0].strip()
return self._format_answer(answer)

View File

@@ -0,0 +1,84 @@
from unittest.mock import MagicMock
import pytest
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.agents.parser import (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
AgentAction,
AgentFinish,
OutputParserException,
)
def test_process_llm_response_with_action_and_final_answer():
"""Test that _process_llm_response correctly handles outputs with both Action and Final Answer."""
# Create a mock LLM
mock_llm = MagicMock()
mock_llm.supports_stop_words.return_value = False
# Create a mock agent
mock_agent = MagicMock()
# Create a CrewAgentExecutor instance
executor = CrewAgentExecutor(
llm=mock_llm,
task=MagicMock(),
crew=MagicMock(),
agent=mock_agent,
prompt={},
max_iter=5,
tools=[],
tools_names="",
stop_words=[],
tools_description="",
tools_handler=MagicMock(),
)
# Test case 1: Output with both Action and Final Answer, with Final Answer after Action
output_with_both = """
Thought: I need to search for information and then provide an answer.
Action: search
Action Input: what is the temperature in SF?
Final Answer: The temperature is 100 degrees
"""
# Mock the _format_answer method to first raise an exception and then return a valid result
format_answer_mock = MagicMock()
format_answer_mock.side_effect = [
OutputParserException(FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE),
AgentAction(thought="", tool="search", tool_input="what is the temperature in SF?", text=""),
]
executor._format_answer = format_answer_mock
# Process the response
result = executor._process_llm_response(output_with_both)
# Verify that the result is an AgentAction
assert isinstance(result, AgentAction)
assert result.tool == "search"
assert result.tool_input == "what is the temperature in SF?"
# Test case 2: Output with both Action and Final Answer, with Observation in between
output_with_observation = """
Thought: I need to search for information.
Action: search
Action Input: what is the temperature in SF?
Observation: The temperature in SF is 100 degrees.
Final Answer: The temperature is 100 degrees
"""
# Reset the mock
format_answer_mock.reset_mock()
format_answer_mock.side_effect = [
OutputParserException(FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE),
AgentAction(thought="", tool="search", tool_input="what is the temperature in SF?", text=""),
]
# Process the response
result = executor._process_llm_response(output_with_observation)
# Verify that the result is an AgentAction
assert isinstance(result, AgentAction)
assert result.tool == "search"
assert result.tool_input == "what is the temperature in SF?"