Compare commits

..

1 Commits

Author SHA1 Message Date
Devin AI
bc5252826e Fix type validation error in hierarchical process delegation (Issue #2606)
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-04-15 08:54:52 +00:00
4 changed files with 46 additions and 96 deletions

View File

@@ -112,8 +112,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
try:
while not isinstance(formatted_answer, AgentFinish):
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
self._check_context_length_before_call()
answer = self.llm.call(
self.messages,
callbacks=self.callbacks,
@@ -329,19 +327,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
)
]
def _check_context_length_before_call(self) -> None:
total_chars = sum(len(msg.get("content", "")) for msg in self.messages)
estimated_tokens = total_chars // 4
context_window_size = self.llm.get_context_window_size()
if estimated_tokens > context_window_size:
self._printer.print(
content=f"Estimated token count ({estimated_tokens}) exceeds context window ({context_window_size}). Handling proactively.",
color="yellow",
)
self._handle_context_length()
def _handle_context_length(self) -> None:
if self.respect_context_window:
self._printer.print(

View File

@@ -1,4 +1,5 @@
from typing import Optional
import json
from typing import Any, Dict, Optional, Union
from pydantic import BaseModel, Field
@@ -6,8 +7,8 @@ from crewai.tools.agent_tools.base_agent_tools import BaseAgentTool
class DelegateWorkToolSchema(BaseModel):
task: str = Field(..., description="The task to delegate")
context: str = Field(..., description="The context for the task")
task: Union[str, Dict[str, Any]] = Field(..., description="The task to delegate")
context: Union[str, Dict[str, Any]] = Field(..., description="The context for the task")
coworker: str = Field(
..., description="The role/name of the coworker to delegate to"
)
@@ -21,10 +22,12 @@ class DelegateWorkTool(BaseAgentTool):
def _run(
self,
task: str,
context: str,
task: Union[str, Dict[str, Any]],
context: Union[str, Dict[str, Any]],
coworker: Optional[str] = None,
**kwargs,
) -> str:
coworker = self._get_coworker(coworker, **kwargs)
return self._execute(coworker, task, context)
task_str = json.dumps(task) if isinstance(task, dict) else task
context_str = json.dumps(context) if isinstance(context, dict) else context
return self._execute(coworker, task_str, context_str)

View File

@@ -1625,78 +1625,3 @@ def test_agent_with_knowledge_sources():
# Assert that the agent provides the correct information
assert "red" in result.raw.lower()
def test_proactive_context_length_handling_prevents_empty_response():
"""Test that proactive context length checking prevents empty LLM responses."""
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
sliding_context_window=True,
)
long_input = "This is a very long input that should exceed the context window. " * 1000
with patch.object(agent.llm, 'get_context_window_size', return_value=100):
with patch.object(agent.agent_executor, '_handle_context_length') as mock_handle:
with patch.object(agent.llm, 'call', return_value="Proper response after summarization"):
agent.agent_executor.messages = [
{"role": "user", "content": long_input}
]
task = Task(
description="Process this long input",
expected_output="A response",
agent=agent,
)
result = agent.execute_task(task)
mock_handle.assert_called()
assert result and result.strip() != ""
def test_proactive_context_length_handling_with_no_summarization():
"""Test proactive context length checking when summarization is disabled."""
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
sliding_context_window=False,
)
long_input = "This is a very long input. " * 1000
with patch.object(agent.llm, 'get_context_window_size', return_value=100):
agent.agent_executor.messages = [
{"role": "user", "content": long_input}
]
with pytest.raises(SystemExit):
agent.agent_executor._check_context_length_before_call()
def test_context_length_estimation():
"""Test the token estimation logic."""
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
)
agent.agent_executor.messages = [
{"role": "user", "content": "Short message"},
{"role": "assistant", "content": "Another short message"},
]
with patch.object(agent.llm, 'get_context_window_size', return_value=10):
with patch.object(agent.agent_executor, '_handle_context_length') as mock_handle:
agent.agent_executor._check_context_length_before_call()
mock_handle.assert_not_called()
with patch.object(agent.llm, 'get_context_window_size', return_value=5):
with patch.object(agent.agent_executor, '_handle_context_length') as mock_handle:
agent.agent_executor._check_context_length_before_call()
mock_handle.assert_called()

View File

@@ -0,0 +1,37 @@
"""Test delegate work tool with dictionary inputs."""
import pytest
from crewai.agent import Agent
from crewai.tools.agent_tools.agent_tools import AgentTools
researcher = Agent(
role="researcher",
goal="make the best research and analysis on content about AI and AI agents",
backstory="You're an expert researcher, specialized in technology",
allow_delegation=False,
)
tools = AgentTools(agents=[researcher]).tools()
delegate_tool = tools[0]
@pytest.mark.vcr(filter_headers=["authorization"])
def test_delegate_work_with_dict_input():
"""Test that the delegate work tool can handle dictionary inputs."""
task_dict = {
"description": "share your take on AI Agents",
"goal": "provide comprehensive analysis"
}
context_dict = {
"background": "I heard you hate them",
"additional_info": "We need this for a report"
}
result = delegate_tool.run(
coworker="researcher",
task=task_dict,
context=context_dict,
)
assert isinstance(result, str)
assert len(result) > 0