Compare commits

...

5 Commits

Author SHA1 Message Date
Devin AI
ed94f2c426 Add type hints, validation, and documentation to _create_manager_agent method
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-03-11 10:34:35 +00:00
Devin AI
57d0eb5b00 Fix import sorting in agent_test.py
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-03-11 10:32:48 +00:00
Devin AI
0bdb5e9b1b Add test cassette to gitignore
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-03-11 10:31:21 +00:00
Devin AI
667fba5847 Remove sensitive test cassette file
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-03-11 10:31:02 +00:00
Devin AI
180fd99416 Fix issue #2333: Pass knowledge sources to manager agent in hierarchical crews
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-03-11 10:29:56 +00:00
3 changed files with 76 additions and 2 deletions

4
.gitignore vendored
View File

@@ -22,4 +22,6 @@ crew_tasks_output.json
.ruff_cache
.venv
agentops.log
test_flow.html
test_flow.html# Test cassettes with sensitive data
tests/cassettes/*_with_knowledge_sources.yaml
tests/cassettes/*_sensitive_*.yaml

View File

@@ -735,7 +735,20 @@ class Crew(BaseModel):
self._create_manager_agent()
return self._execute_tasks(self.tasks)
def _create_manager_agent(self):
def _create_manager_agent(self) -> Agent:
"""Create a manager agent for hierarchical process.
Creates or configures a manager agent that will be responsible for delegating tasks
to other agents in a hierarchical process. If knowledge sources are provided,
they will be passed to the manager agent to enhance its context awareness.
Returns:
Agent: The configured manager agent
Raises:
Exception: If the manager agent has tools, which is not allowed
ValueError: If knowledge sources are provided but not valid BaseKnowledgeSource instances
"""
i18n = I18N(prompt_file=self.prompt_file)
if self.manager_agent is not None:
self.manager_agent.allow_delegation = True
@@ -748,6 +761,13 @@ class Crew(BaseModel):
raise Exception("Manager agent should not have tools")
else:
self.manager_llm = create_llm(self.manager_llm)
# Validate knowledge sources if provided
if self.knowledge_sources and not all(
isinstance(ks, BaseKnowledgeSource) for ks in self.knowledge_sources
):
raise ValueError("All knowledge sources must be instances of BaseKnowledgeSource")
manager = Agent(
role=i18n.retrieve("hierarchical_manager_agent", "role"),
goal=i18n.retrieve("hierarchical_manager_agent", "goal"),
@@ -755,10 +775,12 @@ class Crew(BaseModel):
tools=AgentTools(agents=self.agents).tools(),
allow_delegation=True,
llm=self.manager_llm,
knowledge_sources=self.knowledge_sources,
verbose=self.verbose,
)
self.manager_agent = manager
manager.crew = self
return manager
def _execute_tasks(
self,

View File

@@ -13,6 +13,7 @@ from crewai.agents.parser import AgentAction, CrewAgentParser, OutputParserExcep
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from crewai.llm import LLM
from crewai.process import Process
from crewai.tools import tool
from crewai.tools.tool_calling import InstructorToolCalling
from crewai.tools.tool_usage import ToolUsage
@@ -1797,3 +1798,52 @@ def test_litellm_anthropic_error_handling():
# Verify the LLM call was only made once (no retries)
mock_llm_call.assert_called_once()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_custom_llm_with_knowledge_sources():
"""Test that knowledge sources work with custom LLMs in hierarchical crews."""
# Create a knowledge source with some content
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
# Create a custom LLM
custom_llm = LLM(model="gpt-3.5-turbo")
with patch(
"crewai.knowledge.storage.knowledge_storage.KnowledgeStorage"
) as MockKnowledge:
mock_knowledge_instance = MockKnowledge.return_value
mock_knowledge_instance.sources = [string_source]
mock_knowledge_instance.query.return_value = [{"content": content}]
# Create an agent with the custom LLM
agent = Agent(
role="Information Agent",
goal="Provide information based on knowledge sources",
backstory="You have access to specific knowledge sources.",
llm=custom_llm,
)
# Create a task that requires the agent to use the knowledge
task = Task(
description="What is Brandon's favorite color?",
expected_output="Brandon's favorite color.",
agent=agent,
)
# Create a crew with hierarchical process and custom LLM as manager
crew = Crew(
agents=[agent],
tasks=[task],
process=Process.hierarchical,
manager_llm=custom_llm,
knowledge_sources=[string_source],
)
with patch.object(crew, "_execute_tasks") as mock_execute_tasks:
mock_execute_tasks.return_value.raw = "Brandon's favorite color is red."
result = crew.kickoff()
# Assert that the agent provides the correct information
assert "red" in result.raw.lower()