mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-04-29 14:22:36 +00:00
Compare commits
1 Commits
devin/1747
...
devin/1757
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3bfa1c6559 |
@@ -9,7 +9,6 @@ from crewai.agents import CacheHandler
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.cli.constants import ENV_VARS, LITELLM_PARAMS
|
||||
from crewai.utilities import Logger
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
|
||||
@@ -63,12 +62,8 @@ class Agent(BaseAgent):
|
||||
tools: Tools at agents disposal
|
||||
step_callback: Callback to be executed after each step of the agent execution.
|
||||
knowledge_sources: Knowledge sources for the agent.
|
||||
allow_feedback: Whether the agent can receive and process feedback during execution.
|
||||
allow_conflict: Whether the agent can handle conflicts with other agents during execution.
|
||||
allow_iteration: Whether the agent can iterate on its solutions based on feedback and validation.
|
||||
"""
|
||||
|
||||
_logger = PrivateAttr(default_factory=lambda: Logger(verbose=False))
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
max_execution_time: Optional[int] = Field(
|
||||
default=None,
|
||||
@@ -128,18 +123,6 @@ class Agent(BaseAgent):
|
||||
default="safe",
|
||||
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
|
||||
)
|
||||
allow_feedback: bool = Field(
|
||||
default=False,
|
||||
description="Enable agent to receive and process feedback during execution.",
|
||||
)
|
||||
allow_conflict: bool = Field(
|
||||
default=False,
|
||||
description="Enable agent to handle conflicts with other agents during execution.",
|
||||
)
|
||||
allow_iteration: bool = Field(
|
||||
default=False,
|
||||
description="Enable agent to iterate on its solutions based on feedback and validation.",
|
||||
)
|
||||
embedder_config: Optional[Dict[str, Any]] = Field(
|
||||
default=None,
|
||||
description="Embedder configuration for the agent.",
|
||||
@@ -156,19 +139,6 @@ class Agent(BaseAgent):
|
||||
def post_init_setup(self):
|
||||
self._set_knowledge()
|
||||
self.agent_ops_agent_name = self.role
|
||||
|
||||
if self.allow_feedback:
|
||||
self._logger.log("info", "Feedback mode enabled for agent.", color="bold_green")
|
||||
if self.allow_conflict:
|
||||
self._logger.log("info", "Conflict handling enabled for agent.", color="bold_green")
|
||||
if self.allow_iteration:
|
||||
self._logger.log("info", "Iteration mode enabled for agent.", color="bold_green")
|
||||
|
||||
# Validate boolean parameters
|
||||
for param in ['allow_feedback', 'allow_conflict', 'allow_iteration']:
|
||||
if not isinstance(getattr(self, param), bool):
|
||||
raise ValueError(f"Parameter '{param}' must be a boolean value.")
|
||||
|
||||
unaccepted_attributes = [
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY",
|
||||
@@ -430,9 +400,6 @@ class Agent(BaseAgent):
|
||||
step_callback=self.step_callback,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
respect_context_window=self.respect_context_window,
|
||||
allow_feedback=self.allow_feedback,
|
||||
allow_conflict=self.allow_conflict,
|
||||
allow_iteration=self.allow_iteration,
|
||||
request_within_rpm_limit=(
|
||||
self._rpm_controller.check_or_wait if self._rpm_controller else None
|
||||
),
|
||||
|
||||
@@ -31,34 +31,6 @@ class ToolResult:
|
||||
|
||||
|
||||
class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
"""CrewAgentExecutor class for managing agent execution.
|
||||
|
||||
This class is responsible for executing agent tasks, handling tools,
|
||||
managing agent interactions, and processing the results.
|
||||
|
||||
Parameters:
|
||||
llm: The language model to use for generating responses.
|
||||
task: The task to be executed.
|
||||
crew: The crew that the agent belongs to.
|
||||
agent: The agent to execute the task.
|
||||
prompt: The prompt to use for generating responses.
|
||||
max_iter: Maximum number of iterations for the agent execution.
|
||||
tools: The tools available to the agent.
|
||||
tools_names: The names of the tools available to the agent.
|
||||
stop_words: Words that signal the end of agent execution.
|
||||
tools_description: Description of the tools available to the agent.
|
||||
tools_handler: Handler for tool operations.
|
||||
step_callback: Callback function for each step of execution.
|
||||
original_tools: Original list of tools before processing.
|
||||
function_calling_llm: LLM specifically for function calling.
|
||||
respect_context_window: Whether to respect the context window size.
|
||||
request_within_rpm_limit: Function to check if request is within RPM limit.
|
||||
callbacks: List of callback functions.
|
||||
allow_feedback: Controls feedback processing during execution.
|
||||
allow_conflict: Enables conflict handling between agents.
|
||||
allow_iteration: Allows solution iteration based on feedback.
|
||||
"""
|
||||
|
||||
_logger: Logger = Logger()
|
||||
|
||||
def __init__(
|
||||
@@ -80,9 +52,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
respect_context_window: bool = False,
|
||||
request_within_rpm_limit: Any = None,
|
||||
callbacks: List[Any] = [],
|
||||
allow_feedback: bool = False,
|
||||
allow_conflict: bool = False,
|
||||
allow_iteration: bool = False,
|
||||
):
|
||||
self._i18n: I18N = I18N()
|
||||
self.llm = llm
|
||||
@@ -104,9 +73,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.function_calling_llm = function_calling_llm
|
||||
self.respect_context_window = respect_context_window
|
||||
self.request_within_rpm_limit = request_within_rpm_limit
|
||||
self.allow_feedback = allow_feedback
|
||||
self.allow_conflict = allow_conflict
|
||||
self.allow_iteration = allow_iteration
|
||||
self.ask_for_human_input = False
|
||||
self.messages: List[Dict[str, str]] = []
|
||||
self.iterations = 0
|
||||
@@ -146,6 +112,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
try:
|
||||
while not isinstance(formatted_answer, AgentFinish):
|
||||
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
|
||||
self._check_context_length_before_call()
|
||||
|
||||
answer = self.llm.call(
|
||||
self.messages,
|
||||
callbacks=self.callbacks,
|
||||
@@ -361,6 +329,19 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
]
|
||||
|
||||
def _check_context_length_before_call(self) -> None:
|
||||
total_chars = sum(len(msg.get("content", "")) for msg in self.messages)
|
||||
estimated_tokens = total_chars // 4
|
||||
|
||||
context_window_size = self.llm.get_context_window_size()
|
||||
|
||||
if estimated_tokens > context_window_size:
|
||||
self._printer.print(
|
||||
content=f"Estimated token count ({estimated_tokens}) exceeds context window ({context_window_size}). Handling proactively.",
|
||||
color="yellow",
|
||||
)
|
||||
self._handle_context_length()
|
||||
|
||||
def _handle_context_length(self) -> None:
|
||||
if self.respect_context_window:
|
||||
self._printer.print(
|
||||
@@ -521,56 +502,3 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.ask_for_human_input = False
|
||||
|
||||
return formatted_answer
|
||||
|
||||
def process_feedback(self, feedback: str) -> bool:
|
||||
"""
|
||||
Process feedback for the agent if feedback mode is enabled.
|
||||
|
||||
Parameters:
|
||||
feedback (str): The feedback to process.
|
||||
|
||||
Returns:
|
||||
bool: True if the feedback was processed successfully, False otherwise.
|
||||
"""
|
||||
if not self.allow_feedback:
|
||||
self._logger.log("warning", "Feedback processing skipped (allow_feedback=False).", color="yellow")
|
||||
return False
|
||||
|
||||
self._logger.log("info", f"Processing feedback: {feedback}", color="green")
|
||||
# Add feedback to messages
|
||||
self.messages.append(self._format_msg(f"Feedback: {feedback}"))
|
||||
return True
|
||||
|
||||
def handle_conflict(self, other_agent: 'CrewAgentExecutor') -> bool:
|
||||
"""
|
||||
Handle conflict with another agent if conflict handling is enabled.
|
||||
|
||||
Parameters:
|
||||
other_agent (CrewAgentExecutor): The other agent involved in the conflict.
|
||||
|
||||
Returns:
|
||||
bool: True if the conflict was handled successfully, False otherwise.
|
||||
"""
|
||||
if not self.allow_conflict:
|
||||
self._logger.log("warning", "Conflict handling skipped (allow_conflict=False).", color="yellow")
|
||||
return False
|
||||
|
||||
self._logger.log("info", f"Handling conflict with agent: {other_agent.agent.role}", color="green")
|
||||
return True
|
||||
|
||||
def process_iteration(self, result: Any) -> bool:
|
||||
"""
|
||||
Process iteration based on result if iteration mode is enabled.
|
||||
|
||||
Parameters:
|
||||
result (Any): The result to iterate on.
|
||||
|
||||
Returns:
|
||||
bool: True if the iteration was processed successfully, False otherwise.
|
||||
"""
|
||||
if not self.allow_iteration:
|
||||
self._logger.log("warning", "Iteration processing skipped (allow_iteration=False).", color="yellow")
|
||||
return False
|
||||
|
||||
self._logger.log("info", "Processing iteration on result.", color="green")
|
||||
return True
|
||||
|
||||
@@ -1627,125 +1627,76 @@ def test_agent_with_knowledge_sources():
|
||||
assert "red" in result.raw.lower()
|
||||
|
||||
|
||||
def test_agent_with_feedback_conflict_iteration_params():
|
||||
"""Test that the agent correctly handles the allow_feedback, allow_conflict, and allow_iteration parameters."""
|
||||
def test_proactive_context_length_handling_prevents_empty_response():
|
||||
"""Test that proactive context length checking prevents empty LLM responses."""
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
sliding_context_window=True,
|
||||
)
|
||||
|
||||
long_input = "This is a very long input that should exceed the context window. " * 1000
|
||||
|
||||
with patch.object(agent.llm, 'get_context_window_size', return_value=100):
|
||||
with patch.object(agent.agent_executor, '_handle_context_length') as mock_handle:
|
||||
with patch.object(agent.llm, 'call', return_value="Proper response after summarization"):
|
||||
|
||||
agent.agent_executor.messages = [
|
||||
{"role": "user", "content": long_input}
|
||||
]
|
||||
|
||||
task = Task(
|
||||
description="Process this long input",
|
||||
expected_output="A response",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
result = agent.execute_task(task)
|
||||
|
||||
mock_handle.assert_called()
|
||||
assert result and result.strip() != ""
|
||||
|
||||
|
||||
def test_proactive_context_length_handling_with_no_summarization():
|
||||
"""Test proactive context length checking when summarization is disabled."""
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
sliding_context_window=False,
|
||||
)
|
||||
|
||||
long_input = "This is a very long input. " * 1000
|
||||
|
||||
with patch.object(agent.llm, 'get_context_window_size', return_value=100):
|
||||
agent.agent_executor.messages = [
|
||||
{"role": "user", "content": long_input}
|
||||
]
|
||||
|
||||
with pytest.raises(SystemExit):
|
||||
agent.agent_executor._check_context_length_before_call()
|
||||
|
||||
|
||||
def test_context_length_estimation():
|
||||
"""Test the token estimation logic."""
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
allow_feedback=True,
|
||||
allow_conflict=True,
|
||||
allow_iteration=True,
|
||||
)
|
||||
|
||||
assert agent.allow_feedback is True
|
||||
assert agent.allow_conflict is True
|
||||
assert agent.allow_iteration is True
|
||||
|
||||
# Create another agent with default values
|
||||
default_agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
)
|
||||
|
||||
assert default_agent.allow_feedback is False
|
||||
assert default_agent.allow_conflict is False
|
||||
assert default_agent.allow_iteration is False
|
||||
|
||||
|
||||
def test_agent_feedback_processing():
|
||||
"""Test that the agent correctly processes feedback when allow_feedback is enabled."""
|
||||
from unittest.mock import patch, MagicMock
|
||||
agent.agent_executor.messages = [
|
||||
{"role": "user", "content": "Short message"},
|
||||
{"role": "assistant", "content": "Another short message"},
|
||||
]
|
||||
|
||||
# Create a mock CrewAgentExecutor
|
||||
mock_executor = MagicMock()
|
||||
mock_executor.allow_feedback = True
|
||||
mock_executor.process_feedback.return_value = True
|
||||
with patch.object(agent.llm, 'get_context_window_size', return_value=10):
|
||||
with patch.object(agent.agent_executor, '_handle_context_length') as mock_handle:
|
||||
agent.agent_executor._check_context_length_before_call()
|
||||
mock_handle.assert_not_called()
|
||||
|
||||
# Mock the create_agent_executor method at the module level
|
||||
with patch('crewai.agent.Agent.create_agent_executor', return_value=mock_executor):
|
||||
# Create an agent with allow_feedback=True
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
allow_feedback=True,
|
||||
llm=MagicMock() # Mock LLM to avoid API calls
|
||||
)
|
||||
|
||||
executor = agent.create_agent_executor()
|
||||
assert executor.allow_feedback is True
|
||||
|
||||
result = executor.process_feedback("Test feedback")
|
||||
assert result is True
|
||||
executor.process_feedback.assert_called_once_with("Test feedback")
|
||||
|
||||
|
||||
def test_agent_conflict_handling():
|
||||
"""Test that the agent correctly handles conflicts when allow_conflict is enabled."""
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
mock_executor1 = MagicMock()
|
||||
mock_executor1.allow_conflict = True
|
||||
mock_executor1.handle_conflict.return_value = True
|
||||
|
||||
mock_executor2 = MagicMock()
|
||||
mock_executor2.allow_conflict = True
|
||||
|
||||
with patch('crewai.agent.Agent.create_agent_executor', return_value=mock_executor1):
|
||||
# Create agents with allow_conflict=True
|
||||
agent1 = Agent(
|
||||
role="role1",
|
||||
goal="goal1",
|
||||
backstory="backstory1",
|
||||
allow_conflict=True,
|
||||
llm=MagicMock() # Mock LLM to avoid API calls
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
role="role2",
|
||||
goal="goal2",
|
||||
backstory="backstory2",
|
||||
allow_conflict=True,
|
||||
llm=MagicMock() # Mock LLM to avoid API calls
|
||||
)
|
||||
|
||||
# Get the executors
|
||||
executor1 = agent1.create_agent_executor()
|
||||
executor2 = agent2.create_agent_executor()
|
||||
|
||||
assert executor1.allow_conflict is True
|
||||
assert executor2.allow_conflict is True
|
||||
|
||||
result = executor1.handle_conflict(executor2)
|
||||
assert result is True
|
||||
executor1.handle_conflict.assert_called_once_with(executor2)
|
||||
|
||||
|
||||
def test_agent_iteration_processing():
|
||||
"""Test that the agent correctly processes iterations when allow_iteration is enabled."""
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
# Create a mock CrewAgentExecutor
|
||||
mock_executor = MagicMock()
|
||||
mock_executor.allow_iteration = True
|
||||
mock_executor.process_iteration.return_value = True
|
||||
|
||||
# Mock the create_agent_executor method at the module level
|
||||
with patch('crewai.agent.Agent.create_agent_executor', return_value=mock_executor):
|
||||
# Create an agent with allow_iteration=True
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
allow_iteration=True,
|
||||
llm=MagicMock() # Mock LLM to avoid API calls
|
||||
)
|
||||
|
||||
executor = agent.create_agent_executor()
|
||||
assert executor.allow_iteration is True
|
||||
|
||||
result = executor.process_iteration("Test result")
|
||||
assert result is True
|
||||
executor.process_iteration.assert_called_once_with("Test result")
|
||||
with patch.object(agent.llm, 'get_context_window_size', return_value=5):
|
||||
with patch.object(agent.agent_executor, '_handle_context_length') as mock_handle:
|
||||
agent.agent_executor._check_context_length_before_call()
|
||||
mock_handle.assert_called()
|
||||
|
||||
Reference in New Issue
Block a user