Compare commits

...

2 Commits

3 changed files with 245 additions and 1 deletions

View File

@@ -9,6 +9,7 @@ from crewai.agents import CacheHandler
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.cli.constants import ENV_VARS, LITELLM_PARAMS
from crewai.utilities import Logger
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
@@ -62,8 +63,12 @@ class Agent(BaseAgent):
tools: Tools at agents disposal
step_callback: Callback to be executed after each step of the agent execution.
knowledge_sources: Knowledge sources for the agent.
allow_feedback: Whether the agent can receive and process feedback during execution.
allow_conflict: Whether the agent can handle conflicts with other agents during execution.
allow_iteration: Whether the agent can iterate on its solutions based on feedback and validation.
"""
_logger = PrivateAttr(default_factory=lambda: Logger(verbose=False))
_times_executed: int = PrivateAttr(default=0)
max_execution_time: Optional[int] = Field(
default=None,
@@ -123,6 +128,18 @@ class Agent(BaseAgent):
default="safe",
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
)
allow_feedback: bool = Field(
default=False,
description="Enable agent to receive and process feedback during execution.",
)
allow_conflict: bool = Field(
default=False,
description="Enable agent to handle conflicts with other agents during execution.",
)
allow_iteration: bool = Field(
default=False,
description="Enable agent to iterate on its solutions based on feedback and validation.",
)
embedder_config: Optional[Dict[str, Any]] = Field(
default=None,
description="Embedder configuration for the agent.",
@@ -139,6 +156,19 @@ class Agent(BaseAgent):
def post_init_setup(self):
self._set_knowledge()
self.agent_ops_agent_name = self.role
if self.allow_feedback:
self._logger.log("info", "Feedback mode enabled for agent.", color="bold_green")
if self.allow_conflict:
self._logger.log("info", "Conflict handling enabled for agent.", color="bold_green")
if self.allow_iteration:
self._logger.log("info", "Iteration mode enabled for agent.", color="bold_green")
# Validate boolean parameters
for param in ['allow_feedback', 'allow_conflict', 'allow_iteration']:
if not isinstance(getattr(self, param), bool):
raise ValueError(f"Parameter '{param}' must be a boolean value.")
unaccepted_attributes = [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
@@ -400,6 +430,9 @@ class Agent(BaseAgent):
step_callback=self.step_callback,
function_calling_llm=self.function_calling_llm,
respect_context_window=self.respect_context_window,
allow_feedback=self.allow_feedback,
allow_conflict=self.allow_conflict,
allow_iteration=self.allow_iteration,
request_within_rpm_limit=(
self._rpm_controller.check_or_wait if self._rpm_controller else None
),

View File

@@ -31,6 +31,34 @@ class ToolResult:
class CrewAgentExecutor(CrewAgentExecutorMixin):
"""CrewAgentExecutor class for managing agent execution.
This class is responsible for executing agent tasks, handling tools,
managing agent interactions, and processing the results.
Parameters:
llm: The language model to use for generating responses.
task: The task to be executed.
crew: The crew that the agent belongs to.
agent: The agent to execute the task.
prompt: The prompt to use for generating responses.
max_iter: Maximum number of iterations for the agent execution.
tools: The tools available to the agent.
tools_names: The names of the tools available to the agent.
stop_words: Words that signal the end of agent execution.
tools_description: Description of the tools available to the agent.
tools_handler: Handler for tool operations.
step_callback: Callback function for each step of execution.
original_tools: Original list of tools before processing.
function_calling_llm: LLM specifically for function calling.
respect_context_window: Whether to respect the context window size.
request_within_rpm_limit: Function to check if request is within RPM limit.
callbacks: List of callback functions.
allow_feedback: Controls feedback processing during execution.
allow_conflict: Enables conflict handling between agents.
allow_iteration: Allows solution iteration based on feedback.
"""
_logger: Logger = Logger()
def __init__(
@@ -52,6 +80,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
respect_context_window: bool = False,
request_within_rpm_limit: Any = None,
callbacks: List[Any] = [],
allow_feedback: bool = False,
allow_conflict: bool = False,
allow_iteration: bool = False,
):
self._i18n: I18N = I18N()
self.llm = llm
@@ -73,6 +104,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self.function_calling_llm = function_calling_llm
self.respect_context_window = respect_context_window
self.request_within_rpm_limit = request_within_rpm_limit
self.allow_feedback = allow_feedback
self.allow_conflict = allow_conflict
self.allow_iteration = allow_iteration
self.ask_for_human_input = False
self.messages: List[Dict[str, str]] = []
self.iterations = 0
@@ -487,3 +521,56 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self.ask_for_human_input = False
return formatted_answer
def process_feedback(self, feedback: str) -> bool:
"""
Process feedback for the agent if feedback mode is enabled.
Parameters:
feedback (str): The feedback to process.
Returns:
bool: True if the feedback was processed successfully, False otherwise.
"""
if not self.allow_feedback:
self._logger.log("warning", "Feedback processing skipped (allow_feedback=False).", color="yellow")
return False
self._logger.log("info", f"Processing feedback: {feedback}", color="green")
# Add feedback to messages
self.messages.append(self._format_msg(f"Feedback: {feedback}"))
return True
def handle_conflict(self, other_agent: 'CrewAgentExecutor') -> bool:
"""
Handle conflict with another agent if conflict handling is enabled.
Parameters:
other_agent (CrewAgentExecutor): The other agent involved in the conflict.
Returns:
bool: True if the conflict was handled successfully, False otherwise.
"""
if not self.allow_conflict:
self._logger.log("warning", "Conflict handling skipped (allow_conflict=False).", color="yellow")
return False
self._logger.log("info", f"Handling conflict with agent: {other_agent.agent.role}", color="green")
return True
def process_iteration(self, result: Any) -> bool:
"""
Process iteration based on result if iteration mode is enabled.
Parameters:
result (Any): The result to iterate on.
Returns:
bool: True if the iteration was processed successfully, False otherwise.
"""
if not self.allow_iteration:
self._logger.log("warning", "Iteration processing skipped (allow_iteration=False).", color="yellow")
return False
self._logger.log("info", "Processing iteration on result.", color="green")
return True

View File

@@ -1625,3 +1625,127 @@ def test_agent_with_knowledge_sources():
# Assert that the agent provides the correct information
assert "red" in result.raw.lower()
def test_agent_with_feedback_conflict_iteration_params():
"""Test that the agent correctly handles the allow_feedback, allow_conflict, and allow_iteration parameters."""
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
allow_feedback=True,
allow_conflict=True,
allow_iteration=True,
)
assert agent.allow_feedback is True
assert agent.allow_conflict is True
assert agent.allow_iteration is True
# Create another agent with default values
default_agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
)
assert default_agent.allow_feedback is False
assert default_agent.allow_conflict is False
assert default_agent.allow_iteration is False
def test_agent_feedback_processing():
"""Test that the agent correctly processes feedback when allow_feedback is enabled."""
from unittest.mock import patch, MagicMock
# Create a mock CrewAgentExecutor
mock_executor = MagicMock()
mock_executor.allow_feedback = True
mock_executor.process_feedback.return_value = True
# Mock the create_agent_executor method at the module level
with patch('crewai.agent.Agent.create_agent_executor', return_value=mock_executor):
# Create an agent with allow_feedback=True
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
allow_feedback=True,
llm=MagicMock() # Mock LLM to avoid API calls
)
executor = agent.create_agent_executor()
assert executor.allow_feedback is True
result = executor.process_feedback("Test feedback")
assert result is True
executor.process_feedback.assert_called_once_with("Test feedback")
def test_agent_conflict_handling():
"""Test that the agent correctly handles conflicts when allow_conflict is enabled."""
from unittest.mock import patch, MagicMock
mock_executor1 = MagicMock()
mock_executor1.allow_conflict = True
mock_executor1.handle_conflict.return_value = True
mock_executor2 = MagicMock()
mock_executor2.allow_conflict = True
with patch('crewai.agent.Agent.create_agent_executor', return_value=mock_executor1):
# Create agents with allow_conflict=True
agent1 = Agent(
role="role1",
goal="goal1",
backstory="backstory1",
allow_conflict=True,
llm=MagicMock() # Mock LLM to avoid API calls
)
agent2 = Agent(
role="role2",
goal="goal2",
backstory="backstory2",
allow_conflict=True,
llm=MagicMock() # Mock LLM to avoid API calls
)
# Get the executors
executor1 = agent1.create_agent_executor()
executor2 = agent2.create_agent_executor()
assert executor1.allow_conflict is True
assert executor2.allow_conflict is True
result = executor1.handle_conflict(executor2)
assert result is True
executor1.handle_conflict.assert_called_once_with(executor2)
def test_agent_iteration_processing():
"""Test that the agent correctly processes iterations when allow_iteration is enabled."""
from unittest.mock import patch, MagicMock
# Create a mock CrewAgentExecutor
mock_executor = MagicMock()
mock_executor.allow_iteration = True
mock_executor.process_iteration.return_value = True
# Mock the create_agent_executor method at the module level
with patch('crewai.agent.Agent.create_agent_executor', return_value=mock_executor):
# Create an agent with allow_iteration=True
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
allow_iteration=True,
llm=MagicMock() # Mock LLM to avoid API calls
)
executor = agent.create_agent_executor()
assert executor.allow_iteration is True
result = executor.process_iteration("Test result")
assert result is True
executor.process_iteration.assert_called_once_with("Test result")