mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-13 10:08:29 +00:00
Fix issue #2333: Pass knowledge sources to manager agent in hierarchical crews
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -755,6 +755,7 @@ class Crew(BaseModel):
|
||||
tools=AgentTools(agents=self.agents).tools(),
|
||||
allow_delegation=True,
|
||||
llm=self.manager_llm,
|
||||
knowledge_sources=self.knowledge_sources,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
self.manager_agent = manager
|
||||
|
||||
@@ -7,6 +7,7 @@ from unittest.mock import patch
|
||||
import pytest
|
||||
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.process import Process
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.crew_agent_executor import AgentFinish, CrewAgentExecutor
|
||||
from crewai.agents.parser import AgentAction, CrewAgentParser, OutputParserException
|
||||
@@ -1797,3 +1798,52 @@ def test_litellm_anthropic_error_handling():
|
||||
|
||||
# Verify the LLM call was only made once (no retries)
|
||||
mock_llm_call.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_custom_llm_with_knowledge_sources():
|
||||
"""Test that knowledge sources work with custom LLMs in hierarchical crews."""
|
||||
# Create a knowledge source with some content
|
||||
content = "Brandon's favorite color is red and he likes Mexican food."
|
||||
string_source = StringKnowledgeSource(content=content)
|
||||
|
||||
# Create a custom LLM
|
||||
custom_llm = LLM(model="gpt-3.5-turbo")
|
||||
|
||||
with patch(
|
||||
"crewai.knowledge.storage.knowledge_storage.KnowledgeStorage"
|
||||
) as MockKnowledge:
|
||||
mock_knowledge_instance = MockKnowledge.return_value
|
||||
mock_knowledge_instance.sources = [string_source]
|
||||
mock_knowledge_instance.query.return_value = [{"content": content}]
|
||||
|
||||
# Create an agent with the custom LLM
|
||||
agent = Agent(
|
||||
role="Information Agent",
|
||||
goal="Provide information based on knowledge sources",
|
||||
backstory="You have access to specific knowledge sources.",
|
||||
llm=custom_llm,
|
||||
)
|
||||
|
||||
# Create a task that requires the agent to use the knowledge
|
||||
task = Task(
|
||||
description="What is Brandon's favorite color?",
|
||||
expected_output="Brandon's favorite color.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
# Create a crew with hierarchical process and custom LLM as manager
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
process=Process.hierarchical,
|
||||
manager_llm=custom_llm,
|
||||
knowledge_sources=[string_source],
|
||||
)
|
||||
|
||||
with patch.object(crew, "_execute_tasks") as mock_execute_tasks:
|
||||
mock_execute_tasks.return_value.raw = "Brandon's favorite color is red."
|
||||
result = crew.kickoff()
|
||||
|
||||
# Assert that the agent provides the correct information
|
||||
assert "red" in result.raw.lower()
|
||||
|
||||
77
tests/cassettes/test_custom_llm_with_knowledge_sources.yaml
Normal file
77
tests/cassettes/test_custom_llm_with_knowledge_sources.yaml
Normal file
@@ -0,0 +1,77 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"input": ["Brandon''s favorite color is red and he likes Mexican food."],
|
||||
"model": "text-embedding-3-small", "encoding_format": "base64"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '137'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- Linux
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
content: "{\n \"error\": {\n \"message\": \"Incorrect API key provided:
|
||||
sk-proj-********************************************************************************************************************************************************sLcA.
|
||||
You can find your API key at https://platform.openai.com/account/api-keys.\",\n
|
||||
\ \"type\": \"invalid_request_error\",\n \"param\": null,\n \"code\":
|
||||
\"invalid_api_key\"\n }\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 91ea5f13cd222805-SEA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '414'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Tue, 11 Mar 2025 10:29:33 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=kTdkN5AQIo9r0zvYCiKHoe2I4sc.05ujYaWUlEbBggg-1741688973-1.0.1.1-SDOx8RnzVUs6AsahjrVC7sGMQMc3OHXhXuso4pUPx0xW3HEskjyojGBWbBsgEg687GdTMNkmWPHdvIjspXErv9_fdk1NcTdMZJyPzlSFTSU;
|
||||
path=/; expires=Tue, 11-Mar-25 10:59:33 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=wFljfHJacNcpPFKRXsyZIA0Uz3EiimAH6HAVFEqnNLE-1741688973493-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
vary:
|
||||
- Origin
|
||||
x-request-id:
|
||||
- req_28fcdae6dfccbb341a3354d2de771194
|
||||
http_version: HTTP/1.1
|
||||
status_code: 401
|
||||
version: 1
|
||||
Reference in New Issue
Block a user