Compare commits

...

6 Commits

Author SHA1 Message Date
Devin AI
25441f3a14 Fix test mock to use 'context' key instead of 'content' key
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 04:52:39 +00:00
Devin AI
559d64b4af Add test cassette for task knowledge test
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 04:23:42 +00:00
Devin AI
0fa021dea8 Fix test mocking approach to use real LLM with mocked call method
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 04:19:09 +00:00
Devin AI
0a0a46f972 Fix test implementation to improve reliability and prevent timeouts
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 04:17:40 +00:00
Devin AI
8f0d85b5e7 Fix import sorting in agent_test.py
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 04:05:01 +00:00
Devin AI
b524855e22 Fix issue #2233: Add support for task-specific knowledge
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 04:03:20 +00:00
5 changed files with 226 additions and 0 deletions

View File

@@ -1,3 +1,4 @@
import logging
import re
import shutil
import subprocess
@@ -5,6 +6,8 @@ from typing import Any, Dict, List, Literal, Optional, Sequence, Union
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
logger = logging.getLogger(__name__)
from crewai.agents import CacheHandler
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.crew_agent_executor import CrewAgentExecutor
@@ -207,6 +210,27 @@ class Agent(BaseAgent):
if memory.strip() != "":
task_prompt += self.i18n.slice("memory").format(memory=memory)
# Check if the task has knowledge first
if hasattr(task, 'knowledge') and task.knowledge:
"""
Knowledge is queried in the following priority order:
1. Task-specific knowledge
2. Agent's knowledge
3. Crew's knowledge
This ensures the most specific context is considered first.
"""
try:
task_knowledge_snippets = task.knowledge.query([task.prompt()])
if task_knowledge_snippets:
task_knowledge_context = extract_knowledge_context(
task_knowledge_snippets
)
if task_knowledge_context:
task_prompt += task_knowledge_context
except Exception as e:
logger.warning(f"Error querying task knowledge: {str(e)}")
# Then check agent's knowledge
if self.knowledge:
agent_knowledge_snippets = self.knowledge.query([task.prompt()])
if agent_knowledge_snippets:
@@ -216,6 +240,7 @@ class Agent(BaseAgent):
if agent_knowledge_context:
task_prompt += agent_knowledge_context
# Finally check crew's knowledge
if self.crew:
knowledge_snippets = self.crew.query_knowledge([task.prompt()])
if knowledge_snippets:

View File

@@ -0,0 +1,7 @@
"""
Knowledge management module for CrewAI.
Provides functionality for managing and querying knowledge sources.
"""
from crewai.knowledge.knowledge import Knowledge
__all__ = ["Knowledge"]

View File

@@ -32,6 +32,7 @@ from pydantic import (
from pydantic_core import PydanticCustomError
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.knowledge import Knowledge
from crewai.tasks.guardrail_result import GuardrailResult
from crewai.tasks.output_format import OutputFormat
from crewai.tasks.task_output import TaskOutput
@@ -144,6 +145,10 @@ class Task(BaseModel):
end_time: Optional[datetime.datetime] = Field(
default=None, description="End time of the task execution"
)
knowledge: Optional[Knowledge] = Field(
default=None,
description="Knowledge sources for the task. This knowledge will be used by the agent when executing the task.",
)
@field_validator("guardrail")
@classmethod
@@ -215,6 +220,24 @@ class Task(BaseModel):
"may_not_set_field", "This field is not to be set by the user.", {}
)
@field_validator("knowledge")
@classmethod
def validate_knowledge(cls, knowledge):
"""Validate that the knowledge field is an instance of Knowledge class.
Args:
knowledge: The knowledge to validate. Can be None or an instance of Knowledge.
Returns:
The validated knowledge object, or None if no knowledge was provided.
Raises:
ValueError: If the knowledge is not an instance of Knowledge class.
"""
if knowledge is not None and not isinstance(knowledge, Knowledge):
raise ValueError("Knowledge must be an instance of Knowledge class")
return knowledge
@field_validator("output_file")
@classmethod
def output_file_validation(cls, value: Optional[str]) -> Optional[str]:

View File

@@ -10,6 +10,7 @@ from crewai import Agent, Crew, Task
from crewai.agents.cache import CacheHandler
from crewai.agents.crew_agent_executor import AgentFinish, CrewAgentExecutor
from crewai.agents.parser import AgentAction, CrewAgentParser, OutputParserException
from crewai.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from crewai.llm import LLM
@@ -1661,6 +1662,99 @@ def test_agent_with_knowledge_sources_works_with_copy():
assert isinstance(agent_copy.llm, LLM)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_uses_task_knowledge():
"""Test that an agent uses the knowledge provided in the task."""
# Create a knowledge source with specific content
content = "The capital of France is Paris. The Eiffel Tower is located in Paris."
# Create a mock Knowledge object
with patch("crewai.knowledge.Knowledge", autospec=True) as MockKnowledge:
try:
# Configure the mock
mock_knowledge = MockKnowledge.return_value
mock_knowledge.query.return_value = [{"context": content}]
# Create a real LLM but patch its call method
agent = Agent(
role="Geography Teacher",
goal="Provide accurate geographic information",
backstory="You are a geography expert who teaches students about world capitals.",
llm=LLM(model="gpt-4o-mini"),
)
# Create a task with knowledge
task = Task(
description="What is the capital of France?",
expected_output="The capital of France.",
agent=agent,
knowledge=mock_knowledge,
)
# Mock the agent's execute_task method to avoid actual LLM calls
with patch.object(agent.llm, "call") as mock_llm_call:
mock_llm_call.return_value = "The capital of France is Paris, where the Eiffel Tower is located."
# Execute the task
result = agent.execute_task(task)
# Assert that the agent provides the correct information
assert "paris" in result.lower()
assert "eiffel tower" in result.lower()
# Verify that the task's knowledge was queried
mock_knowledge.query.assert_called_once()
# The query should include the task prompt
query_arg = mock_knowledge.query.call_args[0][0]
assert isinstance(query_arg, list)
assert "capital of france" in query_arg[0].lower()
finally:
MockKnowledge.reset_mock()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_with_empty_task_knowledge():
"""Test that an agent handles empty task knowledge gracefully."""
# Create a mock Knowledge object
with patch("crewai.knowledge.Knowledge", autospec=True) as MockKnowledge:
try:
# Configure the mock to return empty results
mock_knowledge = MockKnowledge.return_value
mock_knowledge.query.return_value = []
# Create a real LLM but patch its call method
agent = Agent(
role="Geography Teacher",
goal="Provide accurate geographic information",
backstory="You are a geography expert who teaches students about world capitals.",
llm=LLM(model="gpt-4o-mini"),
)
# Create a task with empty knowledge
task = Task(
description="What is the capital of France?",
expected_output="The capital of France.",
agent=agent,
knowledge=mock_knowledge,
)
# Mock the agent's execute_task method to avoid actual LLM calls
with patch.object(agent.llm, "call") as mock_llm_call:
mock_llm_call.return_value = "The capital of France is Paris."
# Execute the task
result = agent.execute_task(task)
# Assert that the agent still provides a response
assert "paris" in result.lower()
# Verify that the task's knowledge was queried
mock_knowledge.query.assert_called_once()
finally:
MockKnowledge.reset_mock()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_litellm_auth_error_handling():
"""Test that LiteLLM authentication errors are handled correctly and not retried."""

View File

@@ -0,0 +1,77 @@
interactions:
- request:
body: '{"input": ["The capital of France is Paris. The Eiffel Tower is located
in Paris."], "model": "text-embedding-3-small", "encoding_format": "base64"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '148'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- x64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- Linux
x-stainless-package-version:
- 1.61.0
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.7
method: POST
uri: https://api.openai.com/v1/embeddings
response:
content: "{\n \"error\": {\n \"message\": \"Incorrect API key provided:
sk-proj-********************************************************************************************************************************************************sLcA.
You can find your API key at https://platform.openai.com/account/api-keys.\",\n
\ \"type\": \"invalid_request_error\",\n \"param\": null,\n \"code\":
\"invalid_api_key\"\n }\n}\n"
headers:
CF-RAY:
- 917d03088faba9ba-SEA
Connection:
- keep-alive
Content-Length:
- '414'
Content-Type:
- application/json; charset=utf-8
Date:
- Wed, 26 Feb 2025 03:57:35 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=HYYT2XR.PKWNdkkixpkZ1Ng9oco3jg0qXKeCMokrXpg-1740542255-1.0.1.1-R8kmsL1ridzwjB7hh8WsXLH0.cFmLphr4rrzvoa34aVcYrHEMLhLjQMNEFk3785VyTO20pOk1s_XPYcrzP3IvQ;
path=/; expires=Wed, 26-Feb-25 04:27:35 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=sI_WkX3WMX8K_QEgvBkUG5bNf_iSQm5w662QY8C.4HY-1740542255564-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
X-Content-Type-Options:
- nosniff
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
vary:
- Origin
x-request-id:
- req_f655c752d31b6c29cd52118e474f5a48
http_version: HTTP/1.1
status_code: 401
version: 1