Compare commits

..

2 Commits

Author SHA1 Message Date
Devin AI
264e2b01fd Address lint failures and improve exception handling
- Remove unused imports (os from rag_storage.py, pytest from test_memory_fallback.py)
- Add specific exception handling in fallback mechanism (ConnectionError, ImportError, ValueError)
- Add comprehensive logging to track embedding provider selection and fallback attempts
- Resolves CI lint failures and addresses PR review feedback

Co-Authored-By: João <joao@crewai.com>
2025-06-03 18:43:01 +00:00
Devin AI
faddb7dca2 Fix ValidationError when using memory=True without OpenAI API key
- Add fallback embedding providers in EmbeddingConfigurator
- Modify RAGStorage and KnowledgeStorage to use fallback mechanism
- Add comprehensive tests for memory functionality without OpenAI API key
- Resolves issue #2943 by allowing memory=True with alternative embedding providers

Fallback hierarchy: OpenAI -> Ollama -> HuggingFace -> SentenceTransformers

Co-Authored-By: João <joao@crewai.com>
2025-06-03 18:36:58 +00:00
18 changed files with 232 additions and 83 deletions

View File

@@ -161,7 +161,7 @@ To get started with CrewAI, follow these simple steps:
### 1. Installation
Ensure you have Python >=3.10 <3.14 installed on your system. CrewAI uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
Ensure you have Python >=3.10 <3.13 installed on your system. CrewAI uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
First, install CrewAI:

View File

@@ -200,7 +200,6 @@ class Agent(BaseAgent):
collection_name=self.role,
storage=self.knowledge_storage or None,
)
self.knowledge.add_sources()
except (TypeError, ValueError) as e:
raise ValueError(f"Invalid Knowledge Configuration: {str(e)}")
@@ -244,28 +243,21 @@ class Agent(BaseAgent):
"""
if self.reasoning:
try:
from crewai.utilities.reasoning_handler import (
AgentReasoning,
AgentReasoningOutput,
)
from crewai.utilities.reasoning_handler import AgentReasoning, AgentReasoningOutput
reasoning_handler = AgentReasoning(task=task, agent=self)
reasoning_output: AgentReasoningOutput = (
reasoning_handler.handle_agent_reasoning()
)
reasoning_output: AgentReasoningOutput = reasoning_handler.handle_agent_reasoning()
# Add the reasoning plan to the task description
task.description += f"\n\nReasoning Plan:\n{reasoning_output.plan.plan}"
except Exception as e:
if hasattr(self, "_logger"):
self._logger.log(
"error", f"Error during reasoning process: {str(e)}"
)
if hasattr(self, '_logger'):
self._logger.log("error", f"Error during reasoning process: {str(e)}")
else:
print(f"Error during reasoning process: {str(e)}")
self._inject_date_to_task(task)
if self.tools_handler:
self.tools_handler.last_used_tool = {} # type: ignore # Incompatible types in assignment (expression has type "dict[Never, Never]", variable has type "ToolCalling")
@@ -630,33 +622,22 @@ class Agent(BaseAgent):
"""Inject the current date into the task description if inject_date is enabled."""
if self.inject_date:
from datetime import datetime
try:
valid_format_codes = [
"%Y",
"%m",
"%d",
"%H",
"%M",
"%S",
"%B",
"%b",
"%A",
"%a",
]
valid_format_codes = ['%Y', '%m', '%d', '%H', '%M', '%S', '%B', '%b', '%A', '%a']
is_valid = any(code in self.date_format for code in valid_format_codes)
if not is_valid:
raise ValueError(f"Invalid date format: {self.date_format}")
current_date: str = datetime.now().strftime(self.date_format)
task.description += f"\n\nCurrent Date: {current_date}"
except Exception as e:
if hasattr(self, "_logger"):
if hasattr(self, '_logger'):
self._logger.log("warning", f"Failed to inject date: {str(e)}")
else:
print(f"Warning: Failed to inject date: {str(e)}")
def _validate_docker_installation(self) -> None:
"""Check if Docker is installed and running."""
if not shutil.which("docker"):

View File

@@ -4,7 +4,7 @@ Welcome to the {{crew_name}} Crew project, powered by [crewAI](https://crewai.co
## Installation
Ensure you have Python >=3.10 <3.14 installed on your system. This project uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
Ensure you have Python >=3.10 <3.13 installed on your system. This project uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
First, if you haven't already, install uv:

View File

@@ -3,7 +3,7 @@ name = "{{folder_name}}"
version = "0.1.0"
description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.121.1,<1.0.0"
]

View File

@@ -4,7 +4,7 @@ Welcome to the {{crew_name}} Crew project, powered by [crewAI](https://crewai.co
## Installation
Ensure you have Python >=3.10 <3.14 installed on your system. This project uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
Ensure you have Python >=3.10 <3.13 installed on your system. This project uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
First, if you haven't already, install uv:

View File

@@ -3,7 +3,7 @@ name = "{{folder_name}}"
version = "0.1.0"
description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.121.1,<1.0.0",
]

View File

@@ -5,7 +5,7 @@ custom tools to power up your crews.
## Installing
Ensure you have Python >=3.10 <3.14 installed on your system. This project
Ensure you have Python >=3.10 <3.13 installed on your system. This project
uses [UV](https://docs.astral.sh/uv/) for dependency management and package
handling, offering a seamless setup and execution experience.

View File

@@ -3,7 +3,7 @@ name = "{{folder_name}}"
version = "0.1.0"
description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.121.1"
]

View File

@@ -89,7 +89,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
if available_exports:
console.print(
f"[green]Found these tools to publish: {', '.join([e['name'] for e in available_exports])}[/green]"
f"[green]Found these tools to publish: {', '.join(available_exports)}[/green]"
)
with tempfile.TemporaryDirectory() as temp_build_dir:

View File

@@ -181,13 +181,10 @@ class KnowledgeStorage(BaseKnowledgeStorage):
raise
def _create_default_embedding_function(self):
from chromadb.utils.embedding_functions.openai_embedding_function import (
OpenAIEmbeddingFunction,
)
return OpenAIEmbeddingFunction(
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
)
from crewai.utilities.embedding_configurator import EmbeddingConfigurator
configurator = EmbeddingConfigurator()
return configurator.create_default_embedding_with_fallback()
def _set_embedder_config(self, embedder: Optional[Dict[str, Any]] = None) -> None:
"""Set the embedding configuration for the knowledge storage.

View File

@@ -1,7 +1,6 @@
import contextlib
import io
import logging
import os
import shutil
import uuid
from typing import Any, Dict, List, Optional
@@ -57,7 +56,10 @@ class RAGStorage(BaseRAGStorage):
def _set_embedder_config(self):
configurator = EmbeddingConfigurator()
self.embedder_config = configurator.configure_embedder(self.embedder_config)
if self.embedder_config:
self.embedder_config = configurator.configure_embedder(self.embedder_config)
else:
self.embedder_config = configurator.create_default_embedding_with_fallback()
def _initialize_app(self):
import chromadb
@@ -165,10 +167,7 @@ class RAGStorage(BaseRAGStorage):
)
def _create_default_embedding_function(self):
from chromadb.utils.embedding_functions.openai_embedding_function import (
OpenAIEmbeddingFunction,
)
return OpenAIEmbeddingFunction(
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
)
from crewai.utilities.embedding_configurator import EmbeddingConfigurator
configurator = EmbeddingConfigurator()
return configurator.create_default_embedding_with_fallback()

View File

@@ -464,7 +464,7 @@ def load_agent_from_repository(from_repository: str) -> Dict[str, Any]:
attributes[key] = []
for tool in value:
try:
module = importlib.import_module(tool["module"])
module = importlib.import_module("crewai_tools")
tool_class = getattr(module, tool["name"])
attributes[key].append(tool_class())
except Exception as e:

View File

@@ -55,6 +55,40 @@ class EmbeddingConfigurator:
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
)
def create_default_embedding_with_fallback(self) -> EmbeddingFunction:
"""Create an embedding function with fallback providers when OpenAI API key is not available."""
import logging
logger = logging.getLogger(__name__)
if os.getenv("OPENAI_API_KEY"):
logger.info("Using OpenAI embeddings")
return self._create_default_embedding_function()
logger.warning("OpenAI API key not found, attempting fallback providers")
try:
logger.info("Attempting Ollama embedding provider")
return self.configure_embedder({
"provider": "ollama",
"config": {"url": "http://localhost:11434/api/embeddings"},
"model": "nomic-embed-text"
})
except (ConnectionError, ImportError, ValueError) as e:
logger.warning(f"Ollama fallback failed: {str(e)}, trying HuggingFace")
try:
logger.info("Attempting HuggingFace embedding provider")
return self.configure_embedder({
"provider": "huggingface",
"config": {"api_url": "https://api-inference.huggingface.co/pipeline/feature-extraction/sentence-transformers/all-MiniLM-L6-v2"}
})
except (ConnectionError, ImportError, ValueError) as e:
logger.warning(f"HuggingFace fallback failed: {str(e)}, using local SentenceTransformers")
from chromadb.utils.embedding_functions.sentence_transformer_embedding_function import (
SentenceTransformerEmbeddingFunction,
)
logger.info("Using local SentenceTransformers embedding provider")
return SentenceTransformerEmbeddingFunction(model_name="all-MiniLM-L6-v2")
@staticmethod
def _configure_openai(config, model_name):
from chromadb.utils.embedding_functions.openai_embedding_function import (

View File

@@ -309,9 +309,7 @@ def test_cache_hitting():
def handle_tool_end(source, event):
received_events.append(event)
with (
patch.object(CacheHandler, "read") as read,
):
with (patch.object(CacheHandler, "read") as read,):
read.return_value = "0"
task = Task(
description="What is 2 times 6? Ignore correctness and just return the result of the multiplication tool, you must use the tool.",
@@ -1630,13 +1628,13 @@ def test_agent_execute_task_with_ollama():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_with_knowledge_sources():
# Create a knowledge source with some content
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
with patch("crewai.knowledge") as MockKnowledge:
mock_knowledge_instance = MockKnowledge.return_value
mock_knowledge_instance.sources = [string_source]
mock_knowledge_instance.search.return_value = [{"content": content}]
MockKnowledge.add_sources.return_value = [string_source]
agent = Agent(
role="Information Agent",
@@ -1646,6 +1644,7 @@ def test_agent_with_knowledge_sources():
knowledge_sources=[string_source],
)
# Create a task that requires the agent to use the knowledge
task = Task(
description="What is Brandon's favorite color?",
expected_output="Brandon's favorite color.",
@@ -1653,11 +1652,10 @@ def test_agent_with_knowledge_sources():
)
crew = Crew(agents=[agent], tasks=[task])
with patch.object(Knowledge, "add_sources") as mock_add_sources:
result = crew.kickoff()
assert mock_add_sources.called, "add_sources() should have been called"
mock_add_sources.assert_called_once()
assert "red" in result.raw.lower()
result = crew.kickoff()
# Assert that the agent provides the correct information
assert "red" in result.raw.lower()
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -2038,7 +2036,7 @@ def mock_get_auth_token():
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
def test_agent_from_repository(mock_get_agent, mock_get_auth_token):
from crewai_tools import SerperDevTool, XMLSearchTool
from crewai_tools import SerperDevTool
mock_get_response = MagicMock()
mock_get_response.status_code = 200
@@ -2046,10 +2044,7 @@ def test_agent_from_repository(mock_get_agent, mock_get_auth_token):
"role": "test role",
"goal": "test goal",
"backstory": "test backstory",
"tools": [
{"module": "crewai_tools", "name": "SerperDevTool"},
{"module": "crewai_tools", "name": "XMLSearchTool"},
],
"tools": [{"name": "SerperDevTool"}],
}
mock_get_agent.return_value = mock_get_response
agent = Agent(from_repository="test_agent")
@@ -2057,9 +2052,8 @@ def test_agent_from_repository(mock_get_agent, mock_get_auth_token):
assert agent.role == "test role"
assert agent.goal == "test goal"
assert agent.backstory == "test backstory"
assert len(agent.tools) == 2
assert len(agent.tools) == 1
assert isinstance(agent.tools[0], SerperDevTool)
assert isinstance(agent.tools[1], XMLSearchTool)
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
@@ -2072,7 +2066,7 @@ def test_agent_from_repository_override_attributes(mock_get_agent, mock_get_auth
"role": "test role",
"goal": "test goal",
"backstory": "test backstory",
"tools": [{"name": "SerperDevTool", "module": "crewai_tools"}],
"tools": [{"name": "SerperDevTool"}],
}
mock_get_agent.return_value = mock_get_response
agent = Agent(from_repository="test_agent", role="Custom Role")
@@ -2092,7 +2086,7 @@ def test_agent_from_repository_with_invalid_tools(mock_get_agent, mock_get_auth_
"role": "test role",
"goal": "test goal",
"backstory": "test backstory",
"tools": [{"name": "DoesNotExist", "module": "crewai_tools",}],
"tools": [{"name": "DoesNotExist"}],
}
mock_get_agent.return_value = mock_get_response
with pytest.raises(

View File

@@ -231,7 +231,7 @@ class TestDeployCommand(unittest.TestCase):
[project]
name = "test_project"
version = "0.1.0"
requires-python = ">=3.10,<3.14"
requires-python = ">=3.10,<3.13"
dependencies = ["crewai"]
""",
)
@@ -250,7 +250,7 @@ class TestDeployCommand(unittest.TestCase):
[project]
name = "test_project"
version = "0.1.0"
requires-python = ">=3.10,<3.14"
requires-python = ">=3.10,<3.13"
dependencies = ["crewai"]
""",
)

View File

@@ -165,7 +165,7 @@ def test_publish_when_not_in_sync(mock_is_synced, capsys, tool_command):
)
@patch("crewai.cli.plus_api.PlusAPI.publish_tool")
@patch("crewai.cli.tools.main.git.Repository.is_synced", return_value=False)
@patch("crewai.cli.tools.main.extract_available_exports", return_value=[{"name": "SampleTool"}])
@patch("crewai.cli.tools.main.extract_available_exports", return_value=["SampleTool"])
def test_publish_when_not_in_sync_and_force(
mock_available_exports,
mock_is_synced,
@@ -200,7 +200,7 @@ def test_publish_when_not_in_sync_and_force(
version="1.0.0",
description="A sample tool",
encoded_file=unittest.mock.ANY,
available_exports=[{"name": "SampleTool"}],
available_exports=["SampleTool"],
)
@@ -216,7 +216,7 @@ def test_publish_when_not_in_sync_and_force(
)
@patch("crewai.cli.plus_api.PlusAPI.publish_tool")
@patch("crewai.cli.tools.main.git.Repository.is_synced", return_value=True)
@patch("crewai.cli.tools.main.extract_available_exports", return_value=[{"name": "SampleTool"}])
@patch("crewai.cli.tools.main.extract_available_exports", return_value=["SampleTool"])
def test_publish_success(
mock_available_exports,
mock_is_synced,
@@ -251,7 +251,7 @@ def test_publish_success(
version="1.0.0",
description="A sample tool",
encoded_file=unittest.mock.ANY,
available_exports=[{"name": "SampleTool"}],
available_exports=["SampleTool"],
)
@@ -266,7 +266,7 @@ def test_publish_success(
read_data=b"sample tarball content",
)
@patch("crewai.cli.plus_api.PlusAPI.publish_tool")
@patch("crewai.cli.tools.main.extract_available_exports", return_value=[{"name": "SampleTool"}])
@patch("crewai.cli.tools.main.extract_available_exports", return_value=["SampleTool"])
def test_publish_failure(
mock_available_exports,
mock_publish,
@@ -304,7 +304,7 @@ def test_publish_failure(
read_data=b"sample tarball content",
)
@patch("crewai.cli.plus_api.PlusAPI.publish_tool")
@patch("crewai.cli.tools.main.extract_available_exports", return_value=[{"name": "SampleTool"}])
@patch("crewai.cli.tools.main.extract_available_exports", return_value=["SampleTool"])
def test_publish_api_error(
mock_available_exports,
mock_publish,

View File

@@ -110,6 +110,40 @@ def test_crew_config_conditional_requirement():
with pytest.raises(ValueError):
Crew(process=Process.sequential)
def test_crew_creation_with_memory_true_no_openai_key():
"""Test that crew can be created with memory=True when no OpenAI API key is available."""
import os
from unittest.mock import patch
with patch.dict(os.environ, {}, clear=True):
if 'OPENAI_API_KEY' in os.environ:
del os.environ['OPENAI_API_KEY']
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory"
)
task = Task(
description="Test task",
expected_output="Test output",
agent=agent
)
crew = Crew(
agents=[agent],
tasks=[task],
process=Process.sequential,
memory=True
)
assert crew.memory is True
assert crew._short_term_memory is not None
assert crew._entity_memory is not None
assert crew._long_term_memory is not None
config = json.dumps(
{
"agents": [

View File

@@ -0,0 +1,110 @@
import os
from unittest.mock import patch
from crewai import Agent, Task, Crew, Process
from crewai.memory.short_term.short_term_memory import ShortTermMemory
from crewai.memory.entity.entity_memory import EntityMemory
from crewai.utilities.embedding_configurator import EmbeddingConfigurator
def test_crew_creation_with_memory_true_no_openai_key():
"""Test that crew can be created with memory=True when no OpenAI API key is available."""
with patch.dict(os.environ, {}, clear=True):
if 'OPENAI_API_KEY' in os.environ:
del os.environ['OPENAI_API_KEY']
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory"
)
task = Task(
description="Test task",
expected_output="Test output",
agent=agent
)
crew = Crew(
agents=[agent],
tasks=[task],
process=Process.sequential,
memory=True
)
assert crew.memory is True
assert crew._short_term_memory is not None
assert crew._entity_memory is not None
assert crew._long_term_memory is not None
def test_short_term_memory_initialization_without_openai():
"""Test that ShortTermMemory can be initialized without OpenAI API key."""
with patch.dict(os.environ, {}, clear=True):
if 'OPENAI_API_KEY' in os.environ:
del os.environ['OPENAI_API_KEY']
memory = ShortTermMemory()
assert memory is not None
assert memory.storage is not None
def test_entity_memory_initialization_without_openai():
"""Test that EntityMemory can be initialized without OpenAI API key."""
with patch.dict(os.environ, {}, clear=True):
if 'OPENAI_API_KEY' in os.environ:
del os.environ['OPENAI_API_KEY']
memory = EntityMemory()
assert memory is not None
assert memory.storage is not None
def test_embedding_configurator_fallback():
"""Test that EmbeddingConfigurator provides fallback when OpenAI API key is not available."""
with patch.dict(os.environ, {}, clear=True):
if 'OPENAI_API_KEY' in os.environ:
del os.environ['OPENAI_API_KEY']
configurator = EmbeddingConfigurator()
embedding_function = configurator.create_default_embedding_with_fallback()
assert embedding_function is not None
def test_embedding_configurator_uses_openai_when_available():
"""Test that EmbeddingConfigurator uses OpenAI when API key is available."""
with patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}):
configurator = EmbeddingConfigurator()
embedding_function = configurator.create_default_embedding_with_fallback()
assert embedding_function is not None
assert hasattr(embedding_function, '_api_key')
def test_crew_memory_functionality_without_openai():
"""Test that crew memory functionality works without OpenAI API key."""
with patch.dict(os.environ, {}, clear=True):
if 'OPENAI_API_KEY' in os.environ:
del os.environ['OPENAI_API_KEY']
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory"
)
task = Task(
description="Test task",
expected_output="Test output",
agent=agent
)
crew = Crew(
agents=[agent],
tasks=[task],
process=Process.sequential,
memory=True
)
crew._short_term_memory.save("test data", {"test": "metadata"})
results = crew._short_term_memory.search("test")
assert isinstance(results, list)