Compare commits

..

5 Commits

Author SHA1 Message Date
Devin AI
06d922f452 Fix import sorting in test file using ruff
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:22:50 +00:00
Devin AI
3e8635f1e7 Fix import sorting in test file again
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:21:22 +00:00
Devin AI
e8a1169b85 Fix import sorting in test file
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:19:58 +00:00
Devin AI
64c1c4efa1 Fix linting error in test file
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:19:02 +00:00
Devin AI
46b5fc6538 Fix issue #2237: Properly handle LLM output with both Action and Final Answer
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:17:22 +00:00
6 changed files with 108 additions and 34 deletions

View File

@@ -114,6 +114,7 @@ class Agent(BaseAgent):
@model_validator(mode="after")
def post_init_setup(self):
self._set_knowledge()
self.agent_ops_agent_name = self.role
self.llm = create_llm(self.llm)
@@ -133,11 +134,8 @@ class Agent(BaseAgent):
self.cache_handler = CacheHandler()
self.set_cache_handler(self.cache_handler)
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
def _set_knowledge(self):
try:
if self.embedder is None and crew_embedder:
self.embedder = crew_embedder
if self.knowledge_sources:
full_pattern = re.compile(r"[^a-zA-Z0-9\-_\r\n]|(\.\.)")
knowledge_agent_name = f"{re.sub(full_pattern, '_', self.role)}"

View File

@@ -351,6 +351,3 @@ class BaseAgent(ABC, BaseModel):
if not self._rpm_controller:
self._rpm_controller = rpm_controller
self.create_agent_executor()
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
pass

View File

@@ -232,7 +232,14 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._format_answer(answer)
except OutputParserException as e:
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
answer = answer.split("Observation:")[0].strip()
# If both Action and Final Answer are present, prioritize the Action
# by removing the Final Answer part
if "Final Answer:" in answer:
parts = answer.split("Final Answer:")
answer = parts[0].strip()
# If that doesn't work, try splitting at Observation
elif "Observation:" in answer:
answer = answer.split("Observation:")[0].strip()
return self._format_answer(answer)

View File

@@ -600,7 +600,6 @@ class Crew(BaseModel):
agent.i18n = i18n
# type: ignore[attr-defined] # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
agent.crew = self # type: ignore[attr-defined]
agent.set_knowledge(crew_embedder=self.embedder)
# TODO: Create an AgentFunctionCalling protocol for future refactoring
if not agent.function_calling_llm: # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
agent.function_calling_llm = self.function_calling_llm # type: ignore # "BaseAgent" has no attribute "function_calling_llm"

View File

@@ -4,7 +4,7 @@ SQLite-based implementation of flow state persistence.
import json
import sqlite3
from datetime import datetime, timezone
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Optional, Union
@@ -34,7 +34,6 @@ class SQLiteFlowPersistence(FlowPersistence):
ValueError: If db_path is invalid
"""
from crewai.utilities.paths import db_storage_path
# Get path from argument or default location
path = db_path or str(Path(db_storage_path()) / "flow_states.db")
@@ -47,8 +46,7 @@ class SQLiteFlowPersistence(FlowPersistence):
def init_db(self) -> None:
"""Create the necessary tables if they don't exist."""
with sqlite3.connect(self.db_path) as conn:
conn.execute(
"""
conn.execute("""
CREATE TABLE IF NOT EXISTS flow_states (
id INTEGER PRIMARY KEY AUTOINCREMENT,
flow_uuid TEXT NOT NULL,
@@ -56,15 +54,12 @@ class SQLiteFlowPersistence(FlowPersistence):
timestamp DATETIME NOT NULL,
state_json TEXT NOT NULL
)
"""
)
""")
# Add index for faster UUID lookups
conn.execute(
"""
conn.execute("""
CREATE INDEX IF NOT EXISTS idx_flow_states_uuid
ON flow_states(flow_uuid)
"""
)
""")
def save_state(
self,
@@ -90,22 +85,19 @@ class SQLiteFlowPersistence(FlowPersistence):
)
with sqlite3.connect(self.db_path) as conn:
conn.execute(
"""
conn.execute("""
INSERT INTO flow_states (
flow_uuid,
method_name,
timestamp,
state_json
) VALUES (?, ?, ?, ?)
""",
(
flow_uuid,
method_name,
datetime.now(timezone.utc).isoformat(),
json.dumps(state_dict),
),
)
""", (
flow_uuid,
method_name,
datetime.utcnow().isoformat(),
json.dumps(state_dict),
))
def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]:
"""Load the most recent state for a given flow UUID.
@@ -117,16 +109,13 @@ class SQLiteFlowPersistence(FlowPersistence):
The most recent state as a dictionary, or None if no state exists
"""
with sqlite3.connect(self.db_path) as conn:
cursor = conn.execute(
"""
cursor = conn.execute("""
SELECT state_json
FROM flow_states
WHERE flow_uuid = ?
ORDER BY id DESC
LIMIT 1
""",
(flow_uuid,),
)
""", (flow_uuid,))
row = cursor.fetchone()
if row:

View File

@@ -0,0 +1,84 @@
from unittest.mock import MagicMock
import pytest
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.agents.parser import (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
AgentAction,
AgentFinish,
OutputParserException,
)
def test_process_llm_response_with_action_and_final_answer():
"""Test that _process_llm_response correctly handles outputs with both Action and Final Answer."""
# Create a mock LLM
mock_llm = MagicMock()
mock_llm.supports_stop_words.return_value = False
# Create a mock agent
mock_agent = MagicMock()
# Create a CrewAgentExecutor instance
executor = CrewAgentExecutor(
llm=mock_llm,
task=MagicMock(),
crew=MagicMock(),
agent=mock_agent,
prompt={},
max_iter=5,
tools=[],
tools_names="",
stop_words=[],
tools_description="",
tools_handler=MagicMock(),
)
# Test case 1: Output with both Action and Final Answer, with Final Answer after Action
output_with_both = """
Thought: I need to search for information and then provide an answer.
Action: search
Action Input: what is the temperature in SF?
Final Answer: The temperature is 100 degrees
"""
# Mock the _format_answer method to first raise an exception and then return a valid result
format_answer_mock = MagicMock()
format_answer_mock.side_effect = [
OutputParserException(FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE),
AgentAction(thought="", tool="search", tool_input="what is the temperature in SF?", text=""),
]
executor._format_answer = format_answer_mock
# Process the response
result = executor._process_llm_response(output_with_both)
# Verify that the result is an AgentAction
assert isinstance(result, AgentAction)
assert result.tool == "search"
assert result.tool_input == "what is the temperature in SF?"
# Test case 2: Output with both Action and Final Answer, with Observation in between
output_with_observation = """
Thought: I need to search for information.
Action: search
Action Input: what is the temperature in SF?
Observation: The temperature in SF is 100 degrees.
Final Answer: The temperature is 100 degrees
"""
# Reset the mock
format_answer_mock.reset_mock()
format_answer_mock.side_effect = [
OutputParserException(FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE),
AgentAction(thought="", tool="search", tool_input="what is the temperature in SF?", text=""),
]
# Process the response
result = executor._process_llm_response(output_with_observation)
# Verify that the result is an AgentAction
assert isinstance(result, AgentAction)
assert result.tool == "search"
assert result.tool_input == "what is the temperature in SF?"