mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 04:18:35 +00:00
- Add 5 new Agent configuration fields: - compact_mode: Enable compact prompt mode to reduce context size - tools_prompt_strategy: Choose between 'full' or 'names_only' for tools - proactive_context_trimming: Enable proactive message trimming - memory_max_chars: Cap memory context length - knowledge_max_chars: Cap knowledge context length - Implement compact prompt mode in utilities/prompts.py - Caps role to 100 chars, goal to 150 chars - Omits backstory entirely in compact mode - Implement tools_prompt_strategy in Agent.create_agent_executor - 'names_only' uses get_tool_names for minimal tool descriptions - 'full' uses render_text_description_and_args (default) - Implement memory/knowledge size bounds in Agent.execute_task - Truncates memory and knowledge contexts when limits are set - Add trim_messages_structurally helper in agent_utils.py - Structural trimming without LLM calls - Keeps system messages and last N message pairs - Integrate proactive trimming in CrewAgentExecutor._invoke_loop - Trims messages before each LLM call when enabled - Update LangGraphAdapter and OpenAIAdapter to honor compact_mode - Compacts role/goal/backstory in system prompts - Add comprehensive tests for all new features: - test_prompts_compact_mode.py - test_memory_knowledge_truncation.py - test_tools_prompt_strategy.py - test_proactive_context_trimming.py All changes are opt-in with conservative defaults to maintain backward compatibility. Fixes #3912 Co-Authored-By: João <joao@crewai.com>
86 lines
2.5 KiB
Python
86 lines
2.5 KiB
Python
"""Tests for compact mode in prompt generation."""
|
|
|
|
from unittest.mock import Mock
|
|
|
|
import pytest
|
|
|
|
from crewai.utilities.prompts import Prompts
|
|
|
|
|
|
def test_prompts_compact_mode_shortens_role():
|
|
"""Test that compact mode caps role length to 100 characters."""
|
|
agent = Mock()
|
|
agent.role = "A" * 200
|
|
agent.goal = "Test goal"
|
|
agent.backstory = "Test backstory"
|
|
agent.compact_mode = True
|
|
|
|
prompts = Prompts(agent=agent, has_tools=False)
|
|
result = prompts._build_prompt(["role_playing"])
|
|
|
|
assert len(agent.role) == 200
|
|
assert "A" * 97 + "..." in result
|
|
assert "A" * 100 not in result
|
|
|
|
|
|
def test_prompts_compact_mode_shortens_goal():
|
|
"""Test that compact mode caps goal length to 150 characters."""
|
|
agent = Mock()
|
|
agent.role = "Test role"
|
|
agent.goal = "B" * 200
|
|
agent.backstory = "Test backstory"
|
|
agent.compact_mode = True
|
|
|
|
prompts = Prompts(agent=agent, has_tools=False)
|
|
result = prompts._build_prompt(["role_playing"])
|
|
|
|
assert len(agent.goal) == 200
|
|
assert "B" * 147 + "..." in result
|
|
assert "B" * 150 not in result
|
|
|
|
|
|
def test_prompts_compact_mode_omits_backstory():
|
|
"""Test that compact mode omits backstory entirely."""
|
|
agent = Mock()
|
|
agent.role = "Test role"
|
|
agent.goal = "Test goal"
|
|
agent.backstory = "This is a very long backstory that should be omitted in compact mode"
|
|
agent.compact_mode = True
|
|
|
|
prompts = Prompts(agent=agent, has_tools=False)
|
|
result = prompts._build_prompt(["role_playing"])
|
|
|
|
assert "backstory" not in result.lower() or result.count("{backstory}") > 0
|
|
|
|
|
|
def test_prompts_normal_mode_preserves_full_content():
|
|
"""Test that normal mode (compact_mode=False) preserves full role, goal, and backstory."""
|
|
agent = Mock()
|
|
agent.role = "A" * 200
|
|
agent.goal = "B" * 200
|
|
agent.backstory = "C" * 200
|
|
agent.compact_mode = False
|
|
|
|
prompts = Prompts(agent=agent, has_tools=False)
|
|
result = prompts._build_prompt(["role_playing"])
|
|
|
|
assert "A" * 200 in result
|
|
assert "B" * 200 in result
|
|
assert "C" * 200 in result
|
|
|
|
|
|
def test_prompts_compact_mode_default_false():
|
|
"""Test that compact mode defaults to False when not set."""
|
|
agent = Mock()
|
|
agent.role = "A" * 200
|
|
agent.goal = "B" * 200
|
|
agent.backstory = "C" * 200
|
|
del agent.compact_mode
|
|
|
|
prompts = Prompts(agent=agent, has_tools=False)
|
|
result = prompts._build_prompt(["role_playing"])
|
|
|
|
assert "A" * 200 in result
|
|
assert "B" * 200 in result
|
|
assert "C" * 200 in result
|