mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 08:38:30 +00:00
feat: Add feedback validation and error handling
- Add FeedbackProcessingError for feedback handling - Add validation for empty and long feedback messages - Add test coverage for edge cases Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -18,6 +18,7 @@ from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N, Printer
|
||||
from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE
|
||||
from crewai.utilities.exceptions.feedback_processing_exception import FeedbackProcessingError
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
@@ -487,6 +488,23 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
return CrewAgentParser(agent=self.agent).parse(answer)
|
||||
|
||||
def _format_msg(self, prompt: str, role: str = "user") -> Dict[str, str]:
|
||||
"""Format a message with role and content.
|
||||
|
||||
Args:
|
||||
prompt (str): The message content
|
||||
role (str): The message role (default: "user")
|
||||
|
||||
Returns:
|
||||
Dict[str, str]: Formatted message with role and content
|
||||
|
||||
Raises:
|
||||
FeedbackProcessingError: If prompt is empty or exceeds max length
|
||||
"""
|
||||
if not prompt or not prompt.strip():
|
||||
raise FeedbackProcessingError("Feedback message cannot be empty")
|
||||
if len(prompt) > 8192: # Standard context window size
|
||||
raise FeedbackProcessingError("Feedback message exceeds maximum length")
|
||||
|
||||
prompt = prompt.rstrip()
|
||||
return {"role": role, "content": prompt}
|
||||
|
||||
|
||||
@@ -2,13 +2,14 @@
|
||||
|
||||
import os
|
||||
from unittest import mock
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.utilities.exceptions.feedback_processing_exception import FeedbackProcessingError
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.parser import AgentAction, CrewAgentParser, OutputParserException
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
@@ -1001,6 +1002,53 @@ def test_agent_human_input():
|
||||
assert mock_human_input.call_count == 2 # Should have asked for feedback twice
|
||||
assert output.strip().lower() == "hello" # Final output should be 'Hello'
|
||||
|
||||
# Verify message format for human feedback
|
||||
messages = agent.agent_executor.messages
|
||||
feedback_messages = [m for m in messages if "Feedback:" in m.get("content", "")]
|
||||
assert len(feedback_messages) == 2 # Two feedback messages
|
||||
for msg in feedback_messages:
|
||||
assert msg["role"] == "user" # All feedback messages should have user role
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_executor():
|
||||
"""Create a mock executor for testing."""
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory"
|
||||
)
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
human_input=True,
|
||||
agent=agent
|
||||
)
|
||||
executor = CrewAgentExecutor(
|
||||
agent=agent,
|
||||
task=task,
|
||||
llm=agent.llm,
|
||||
crew=None,
|
||||
prompt="",
|
||||
max_iter=1,
|
||||
tools=[],
|
||||
tools_names=[],
|
||||
stop_words=[],
|
||||
tools_description="",
|
||||
tools_handler=None
|
||||
)
|
||||
return executor
|
||||
|
||||
def test_empty_feedback_handling(mock_executor):
|
||||
"""Test that empty feedback is properly handled."""
|
||||
with pytest.raises(FeedbackProcessingError):
|
||||
mock_executor._format_msg("")
|
||||
|
||||
def test_long_feedback_handling(mock_executor):
|
||||
"""Test that very long feedback is properly handled."""
|
||||
very_long_feedback = "x" * 10000
|
||||
with pytest.raises(FeedbackProcessingError):
|
||||
mock_executor._format_msg(very_long_feedback)
|
||||
|
||||
def test_interpolate_inputs():
|
||||
agent = Agent(
|
||||
|
||||
Reference in New Issue
Block a user