Compare commits

..

2 Commits

8 changed files with 247 additions and 90 deletions

View File

@@ -9,6 +9,7 @@ from crewai.agents import CacheHandler
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.cli.constants import ENV_VARS, LITELLM_PARAMS
from crewai.utilities import Logger
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
@@ -23,7 +24,6 @@ from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_F
from crewai.utilities.converter import generate_model_description
from crewai.utilities.token_counter_callback import TokenCalcHandler
from crewai.utilities.training_handler import CrewTrainingHandler
from crewai.utilities.typing import AgentConfig
agentops = None
@@ -63,8 +63,12 @@ class Agent(BaseAgent):
tools: Tools at agents disposal
step_callback: Callback to be executed after each step of the agent execution.
knowledge_sources: Knowledge sources for the agent.
allow_feedback: Whether the agent can receive and process feedback during execution.
allow_conflict: Whether the agent can handle conflicts with other agents during execution.
allow_iteration: Whether the agent can iterate on its solutions based on feedback and validation.
"""
_logger = PrivateAttr(default_factory=lambda: Logger(verbose=False))
_times_executed: int = PrivateAttr(default=0)
max_execution_time: Optional[int] = Field(
default=None,
@@ -89,7 +93,6 @@ class Agent(BaseAgent):
function_calling_llm: Optional[Any] = Field(
description="Language model that will run the agent.", default=None
)
config: Optional[Union[Dict[str, Any], AgentConfig]] = Field(default=None)
system_template: Optional[str] = Field(
default=None, description="System format for the agent."
)
@@ -125,6 +128,18 @@ class Agent(BaseAgent):
default="safe",
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
)
allow_feedback: bool = Field(
default=False,
description="Enable agent to receive and process feedback during execution.",
)
allow_conflict: bool = Field(
default=False,
description="Enable agent to handle conflicts with other agents during execution.",
)
allow_iteration: bool = Field(
default=False,
description="Enable agent to iterate on its solutions based on feedback and validation.",
)
embedder_config: Optional[Dict[str, Any]] = Field(
default=None,
description="Embedder configuration for the agent.",
@@ -141,6 +156,19 @@ class Agent(BaseAgent):
def post_init_setup(self):
self._set_knowledge()
self.agent_ops_agent_name = self.role
if self.allow_feedback:
self._logger.log("info", "Feedback mode enabled for agent.", color="bold_green")
if self.allow_conflict:
self._logger.log("info", "Conflict handling enabled for agent.", color="bold_green")
if self.allow_iteration:
self._logger.log("info", "Iteration mode enabled for agent.", color="bold_green")
# Validate boolean parameters
for param in ['allow_feedback', 'allow_conflict', 'allow_iteration']:
if not isinstance(getattr(self, param), bool):
raise ValueError(f"Parameter '{param}' must be a boolean value.")
unaccepted_attributes = [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
@@ -402,6 +430,9 @@ class Agent(BaseAgent):
step_callback=self.step_callback,
function_calling_llm=self.function_calling_llm,
respect_context_window=self.respect_context_window,
allow_feedback=self.allow_feedback,
allow_conflict=self.allow_conflict,
allow_iteration=self.allow_iteration,
request_within_rpm_limit=(
self._rpm_controller.check_or_wait if self._rpm_controller else None
),

View File

@@ -31,6 +31,34 @@ class ToolResult:
class CrewAgentExecutor(CrewAgentExecutorMixin):
"""CrewAgentExecutor class for managing agent execution.
This class is responsible for executing agent tasks, handling tools,
managing agent interactions, and processing the results.
Parameters:
llm: The language model to use for generating responses.
task: The task to be executed.
crew: The crew that the agent belongs to.
agent: The agent to execute the task.
prompt: The prompt to use for generating responses.
max_iter: Maximum number of iterations for the agent execution.
tools: The tools available to the agent.
tools_names: The names of the tools available to the agent.
stop_words: Words that signal the end of agent execution.
tools_description: Description of the tools available to the agent.
tools_handler: Handler for tool operations.
step_callback: Callback function for each step of execution.
original_tools: Original list of tools before processing.
function_calling_llm: LLM specifically for function calling.
respect_context_window: Whether to respect the context window size.
request_within_rpm_limit: Function to check if request is within RPM limit.
callbacks: List of callback functions.
allow_feedback: Controls feedback processing during execution.
allow_conflict: Enables conflict handling between agents.
allow_iteration: Allows solution iteration based on feedback.
"""
_logger: Logger = Logger()
def __init__(
@@ -52,6 +80,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
respect_context_window: bool = False,
request_within_rpm_limit: Any = None,
callbacks: List[Any] = [],
allow_feedback: bool = False,
allow_conflict: bool = False,
allow_iteration: bool = False,
):
self._i18n: I18N = I18N()
self.llm = llm
@@ -73,6 +104,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self.function_calling_llm = function_calling_llm
self.respect_context_window = respect_context_window
self.request_within_rpm_limit = request_within_rpm_limit
self.allow_feedback = allow_feedback
self.allow_conflict = allow_conflict
self.allow_iteration = allow_iteration
self.ask_for_human_input = False
self.messages: List[Dict[str, str]] = []
self.iterations = 0
@@ -487,3 +521,56 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self.ask_for_human_input = False
return formatted_answer
def process_feedback(self, feedback: str) -> bool:
"""
Process feedback for the agent if feedback mode is enabled.
Parameters:
feedback (str): The feedback to process.
Returns:
bool: True if the feedback was processed successfully, False otherwise.
"""
if not self.allow_feedback:
self._logger.log("warning", "Feedback processing skipped (allow_feedback=False).", color="yellow")
return False
self._logger.log("info", f"Processing feedback: {feedback}", color="green")
# Add feedback to messages
self.messages.append(self._format_msg(f"Feedback: {feedback}"))
return True
def handle_conflict(self, other_agent: 'CrewAgentExecutor') -> bool:
"""
Handle conflict with another agent if conflict handling is enabled.
Parameters:
other_agent (CrewAgentExecutor): The other agent involved in the conflict.
Returns:
bool: True if the conflict was handled successfully, False otherwise.
"""
if not self.allow_conflict:
self._logger.log("warning", "Conflict handling skipped (allow_conflict=False).", color="yellow")
return False
self._logger.log("info", f"Handling conflict with agent: {other_agent.agent.role}", color="green")
return True
def process_iteration(self, result: Any) -> bool:
"""
Process iteration based on result if iteration mode is enabled.
Parameters:
result (Any): The result to iterate on.
Returns:
bool: True if the iteration was processed successfully, False otherwise.
"""
if not self.allow_iteration:
self._logger.log("warning", "Iteration processing skipped (allow_iteration=False).", color="yellow")
return False
self._logger.log("info", "Processing iteration on result.", color="green")
return True

View File

@@ -16,12 +16,6 @@ def after_kickoff(func):
def task(func):
"""Decorator to mark a method as a task creator.
When applied to a method in a class decorated with @CrewBase,
this makes the method's return value accessible as an element
of the self.tasks list.
"""
func.is_task = True
@wraps(func)
@@ -35,12 +29,6 @@ def task(func):
def agent(func):
"""Decorator to mark a method as an agent creator.
When applied to a method in a class decorated with @CrewBase,
this makes the method's return value accessible as an element
of the self.agents list.
"""
func.is_agent = True
func = memoize(func)
return func

View File

@@ -1,6 +1,6 @@
import inspect
from pathlib import Path
from typing import Any, Callable, Dict, List, TypeVar, cast
from typing import Any, Callable, Dict, TypeVar, cast
import yaml
from dotenv import load_dotenv
@@ -66,9 +66,6 @@ def CrewBase(cls: T) -> T:
self._kickoff = self._filter_functions(
self._original_functions, "is_kickoff"
)
self.agents = [] # type: List[Any]
self.tasks = [] # type: List[Any]
@staticmethod
def load_yaml(config_path: Path):

View File

@@ -41,7 +41,6 @@ from crewai.tools.base_tool import BaseTool
from crewai.utilities.config import process_config
from crewai.utilities.converter import Converter, convert_to_model
from crewai.utilities.i18n import I18N
from crewai.utilities.typing import TaskConfig
class Task(BaseModel):
@@ -75,7 +74,7 @@ class Task(BaseModel):
expected_output: str = Field(
description="Clear definition of expected output for the task."
)
config: Optional[Union[Dict[str, Any], TaskConfig]] = Field(
config: Optional[Dict[str, Any]] = Field(
description="Configuration for the agent",
default=None,
)

View File

@@ -1,14 +0,0 @@
from typing import Dict, List, Optional, Any, TypedDict, Union
class AgentConfig(TypedDict, total=False):
"""TypedDict for agent configuration loaded from YAML."""
role: str
goal: str
backstory: str
verbose: bool
class TaskConfig(TypedDict, total=False):
"""TypedDict for task configuration loaded from YAML."""
description: str
expected_output: str
agent: str # Role of the agent to execute this task

View File

@@ -1625,3 +1625,127 @@ def test_agent_with_knowledge_sources():
# Assert that the agent provides the correct information
assert "red" in result.raw.lower()
def test_agent_with_feedback_conflict_iteration_params():
"""Test that the agent correctly handles the allow_feedback, allow_conflict, and allow_iteration parameters."""
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
allow_feedback=True,
allow_conflict=True,
allow_iteration=True,
)
assert agent.allow_feedback is True
assert agent.allow_conflict is True
assert agent.allow_iteration is True
# Create another agent with default values
default_agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
)
assert default_agent.allow_feedback is False
assert default_agent.allow_conflict is False
assert default_agent.allow_iteration is False
def test_agent_feedback_processing():
"""Test that the agent correctly processes feedback when allow_feedback is enabled."""
from unittest.mock import patch, MagicMock
# Create a mock CrewAgentExecutor
mock_executor = MagicMock()
mock_executor.allow_feedback = True
mock_executor.process_feedback.return_value = True
# Mock the create_agent_executor method at the module level
with patch('crewai.agent.Agent.create_agent_executor', return_value=mock_executor):
# Create an agent with allow_feedback=True
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
allow_feedback=True,
llm=MagicMock() # Mock LLM to avoid API calls
)
executor = agent.create_agent_executor()
assert executor.allow_feedback is True
result = executor.process_feedback("Test feedback")
assert result is True
executor.process_feedback.assert_called_once_with("Test feedback")
def test_agent_conflict_handling():
"""Test that the agent correctly handles conflicts when allow_conflict is enabled."""
from unittest.mock import patch, MagicMock
mock_executor1 = MagicMock()
mock_executor1.allow_conflict = True
mock_executor1.handle_conflict.return_value = True
mock_executor2 = MagicMock()
mock_executor2.allow_conflict = True
with patch('crewai.agent.Agent.create_agent_executor', return_value=mock_executor1):
# Create agents with allow_conflict=True
agent1 = Agent(
role="role1",
goal="goal1",
backstory="backstory1",
allow_conflict=True,
llm=MagicMock() # Mock LLM to avoid API calls
)
agent2 = Agent(
role="role2",
goal="goal2",
backstory="backstory2",
allow_conflict=True,
llm=MagicMock() # Mock LLM to avoid API calls
)
# Get the executors
executor1 = agent1.create_agent_executor()
executor2 = agent2.create_agent_executor()
assert executor1.allow_conflict is True
assert executor2.allow_conflict is True
result = executor1.handle_conflict(executor2)
assert result is True
executor1.handle_conflict.assert_called_once_with(executor2)
def test_agent_iteration_processing():
"""Test that the agent correctly processes iterations when allow_iteration is enabled."""
from unittest.mock import patch, MagicMock
# Create a mock CrewAgentExecutor
mock_executor = MagicMock()
mock_executor.allow_iteration = True
mock_executor.process_iteration.return_value = True
# Mock the create_agent_executor method at the module level
with patch('crewai.agent.Agent.create_agent_executor', return_value=mock_executor):
# Create an agent with allow_iteration=True
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
allow_iteration=True,
llm=MagicMock() # Mock LLM to avoid API calls
)
executor = agent.create_agent_executor()
assert executor.allow_iteration is True
result = executor.process_iteration("Test result")
assert result is True
executor.process_iteration.assert_called_once_with("Test result")

View File

@@ -1,55 +0,0 @@
from typing import Dict, Any
import pytest
from crewai.agent import Agent
from crewai.task import Task
from crewai.utilities.typing import AgentConfig, TaskConfig
def test_agent_with_config_dict():
config: AgentConfig = {
"role": "Test Agent",
"goal": "Test Goal",
"backstory": "Test Backstory",
"verbose": True
}
agent = Agent(config=config)
assert agent.role == "Test Agent"
assert agent.goal == "Test Goal"
assert agent.backstory == "Test Backstory"
assert agent.verbose is True
def test_agent_with_yaml_config():
config: Dict[str, Any] = {
"researcher": {
"role": "Researcher",
"goal": "Research Goal",
"backstory": "Researcher Backstory",
"verbose": True
}
}
agent = Agent(config=config["researcher"])
assert agent.role == "Researcher"
assert agent.goal == "Research Goal"
assert agent.backstory == "Researcher Backstory"
def test_task_with_config_dict():
config: TaskConfig = {
"description": "Test Task",
"expected_output": "Test Output",
"agent": "researcher"
}
agent = Agent(role="Researcher", goal="Goal", backstory="Backstory")
task = Task(config=config, agent=agent)
assert task.description == "Test Task"
assert task.expected_output == "Test Output"
assert task.agent == agent