Compare commits

..

8 Commits

Author SHA1 Message Date
Devin AI
3129e7a4bc fix: Update I18N mocking strategy to use constructor mock
- Replace @patch('load_prompts') with @patch('I18N') decorator
- Mock I18N constructor to return MagicMock instance
- Prevent 'Prompt file None not found' errors during Agent instantiation
- Follow same mocking pattern as other tests in codebase

Co-Authored-By: João <joao@crewai.com>
2025-06-18 11:37:24 +00:00
Devin AI
469ddea415 fix: Update I18N mocking strategy for Docker validation tests
- Replace @patch decorator with module-level load_prompts mocking
- Prevent 'Prompt file None not found' errors during Agent instantiation
- Ensure tests are isolated and don't require external prompt files

Co-Authored-By: João <joao@crewai.com>
2025-06-18 11:28:45 +00:00
Devin AI
968d0a0e2c fix: Add proper I18N mocking to Docker validation tests
- Mock I18N initialization to prevent 'Prompt file None not found' errors
- Follow existing test patterns for Agent dependency mocking
- Ensure tests are isolated and don't require external files

Co-Authored-By: João <joao@crewai.com>
2025-06-18 11:17:09 +00:00
Devin AI
6938ca5a33 fix: Docker validation in container environments
- Add CREWAI_SKIP_DOCKER_VALIDATION environment variable
- Detect container environments and skip Docker validation
- Improve error messages with alternative solutions
- Add comprehensive tests for Docker validation scenarios
- Maintain backward compatibility

Fixes #3028

Co-Authored-By: João <joao@crewai.com>
2025-06-18 11:09:11 +00:00
Lucas Gomide
db1e9e9b9a fix: fix pydantic support to 2.7.x (#3016)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
Pydantic 2.7.x does not support a second parameter in model validators with mode="after"
2025-06-16 16:20:10 -04:00
Lucas Gomide
d92382b6cf fix: SSL error while getting LLM data from GH (#3014)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
When running behind cloud-based security users are struggling to donwload LLM data from Github. Usually the following error is raised

```
SSL certificate verification failed: HTTPSConnectionPool(host='raw.githubusercontent.com', port=443): Max retries exceeded with url: /BerriAI/litellm/main/model_prices_and_context_window.json (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1010)')))
Current CA bundle path: /usr/local/etc///.pem
```

This commit ensures the SSL config is beign provided while requesting data
2025-06-16 11:34:04 -04:00
Lucas Gomide
7c8f2a1325 docs: add missing docs about LLMGuardrail events (#3013) 2025-06-16 11:05:36 -04:00
Vidit Ostwal
a40447df29 updated docs (#2989)
Co-authored-by: Lucas Gomide <lucaslg200@gmail.com>
2025-06-16 10:49:27 -04:00
6 changed files with 252 additions and 139 deletions

View File

@@ -295,6 +295,11 @@ multimodal_agent = Agent(
- `"safe"`: Uses Docker (recommended for production)
- `"unsafe"`: Direct execution (use only in trusted environments)
<Note>
This runs a default Docker image. If you want to configure the docker image, the checkout the Code Interpreter Tool in the tools section.
Add the code interpreter tool as a tool in the agent as a tool parameter.
</Note>
#### Advanced Features
- `multimodal`: Enable multimodal capabilities for processing text and visual content
- `reasoning`: Enable agent to reflect and create plans before executing tasks

View File

@@ -233,6 +233,11 @@ CrewAI provides a wide range of events that you can listen for:
- **KnowledgeQueryFailedEvent**: Emitted when a knowledge query fails
- **KnowledgeSearchQueryFailedEvent**: Emitted when a knowledge search query fails
### LLM Guardrail Events
- **LLMGuardrailStartedEvent**: Emitted when a guardrail validation starts. Contains details about the guardrail being applied and retry count.
- **LLMGuardrailCompletedEvent**: Emitted when a guardrail validation completes. Contains details about validation success/failure, results, and error messages if any.
### Flow Events
- **FlowCreatedEvent**: Emitted when a Flow is created

View File

@@ -1,6 +1,18 @@
import os
import shutil
import subprocess
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Type, Union
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
@@ -157,7 +169,7 @@ class Agent(BaseAgent):
)
guardrail: Optional[Union[Callable[[Any], Tuple[bool, Any]], str]] = Field(
default=None,
description="Function or string description of a guardrail to validate agent output"
description="Function or string description of a guardrail to validate agent output",
)
guardrail_max_retries: int = Field(
default=3, description="Maximum number of retries when guardrail fails"
@@ -665,10 +677,26 @@ class Agent(BaseAgent):
print(f"Warning: Failed to inject date: {str(e)}")
def _validate_docker_installation(self) -> None:
"""Check if Docker is installed and running."""
"""Check if Docker is installed and running, with container environment support."""
if os.getenv("CREWAI_SKIP_DOCKER_VALIDATION", "false").lower() == "true":
return
if self.code_execution_mode == "unsafe":
return
if self._is_running_in_container():
if hasattr(self, "_logger"):
self._logger.log(
"warning",
f"Running inside container - skipping Docker validation for agent: {self.role}. "
f"Set CREWAI_SKIP_DOCKER_VALIDATION=true to suppress this warning.",
)
return
if not shutil.which("docker"):
raise RuntimeError(
f"Docker is not installed. Please install Docker to use code execution with agent: {self.role}"
f"Docker is not installed. Please install Docker to use code execution with agent: {self.role}. "
f"Alternatively, set code_execution_mode='unsafe' or CREWAI_SKIP_DOCKER_VALIDATION=true."
)
try:
@@ -680,9 +708,32 @@ class Agent(BaseAgent):
)
except subprocess.CalledProcessError:
raise RuntimeError(
f"Docker is not running. Please start Docker to use code execution with agent: {self.role}"
f"Docker is not running. Please start Docker to use code execution with agent: {self.role}. "
f"Alternatively, set code_execution_mode='unsafe' or CREWAI_SKIP_DOCKER_VALIDATION=true."
)
def _is_running_in_container(self) -> bool:
"""Detect if the current process is running inside a container."""
if os.path.exists("/.dockerenv"):
return True
try:
with open("/proc/1/cgroup", "r") as f:
content = f.read()
if (
"docker" in content
or "container" in content
or "kubepods" in content
):
return True
except (FileNotFoundError, PermissionError):
pass
if os.getpid() == 1:
return True
return False
def __repr__(self):
return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"

View File

@@ -1,3 +1,5 @@
import os
import certifi
import json
import time
from collections import defaultdict
@@ -163,8 +165,10 @@ def fetch_provider_data(cache_file):
Returns:
- dict or None: The fetched provider data or None if the operation fails.
"""
ssl_config = os.environ['SSL_CERT_FILE'] = certifi.where()
try:
response = requests.get(JSON_URL, stream=True, timeout=60)
response = requests.get(JSON_URL, stream=True, timeout=60, verify=ssl_config)
response.raise_for_status()
data = download_data(response)
with open(cache_file, "w") as f:

View File

@@ -0,0 +1,181 @@
"""Test Docker validation functionality in Agent."""
import os
import subprocess
from unittest.mock import Mock, patch, mock_open, MagicMock
import pytest
from crewai import Agent
@patch('crewai.utilities.i18n.I18N')
class TestDockerValidation:
"""Test cases for Docker validation in Agent."""
def test_docker_validation_skipped_with_env_var(self, mock_i18n):
"""Test that Docker validation is skipped when CREWAI_SKIP_DOCKER_VALIDATION=true."""
mock_i18n.return_value = MagicMock()
with patch.dict(os.environ, {"CREWAI_SKIP_DOCKER_VALIDATION": "true"}):
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
allow_code_execution=True,
)
assert agent.allow_code_execution is True
def test_docker_validation_skipped_with_unsafe_mode(self, mock_i18n):
"""Test that Docker validation is skipped when code_execution_mode='unsafe'."""
mock_i18n.return_value = MagicMock()
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
allow_code_execution=True,
code_execution_mode="unsafe",
)
assert agent.code_execution_mode == "unsafe"
@patch("crewai.agent.os.path.exists")
def test_docker_validation_skipped_in_container_dockerenv(self, mock_exists, mock_i18n):
"""Test that Docker validation is skipped when /.dockerenv exists."""
mock_exists.return_value = True
mock_i18n.return_value = MagicMock()
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
allow_code_execution=True,
)
assert agent.allow_code_execution is True
@patch("crewai.agent.os.path.exists")
@patch("builtins.open", new_callable=mock_open, read_data="12:memory:/docker/container123")
def test_docker_validation_skipped_in_container_cgroup(self, mock_file, mock_exists, mock_i18n):
"""Test that Docker validation is skipped when cgroup indicates container."""
mock_exists.return_value = False
mock_i18n.return_value = MagicMock()
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
allow_code_execution=True,
)
assert agent.allow_code_execution is True
@patch("crewai.agent.os.path.exists")
@patch("crewai.agent.os.getpid")
@patch("builtins.open", side_effect=FileNotFoundError)
def test_docker_validation_skipped_in_container_pid1(self, mock_file, mock_getpid, mock_exists, mock_i18n):
"""Test that Docker validation is skipped when running as PID 1."""
mock_exists.return_value = False
mock_getpid.return_value = 1
mock_i18n.return_value = MagicMock()
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
allow_code_execution=True,
)
assert agent.allow_code_execution is True
@patch("crewai.agent.shutil.which")
@patch("crewai.agent.os.path.exists")
@patch("crewai.agent.os.getpid")
@patch("builtins.open", side_effect=FileNotFoundError)
def test_docker_validation_fails_no_docker(self, mock_file, mock_getpid, mock_exists, mock_which, mock_i18n):
"""Test that Docker validation fails when Docker is not installed."""
mock_exists.return_value = False
mock_getpid.return_value = 1000
mock_which.return_value = None
mock_i18n.return_value = MagicMock()
with pytest.raises(RuntimeError, match="Docker is not installed"):
Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
allow_code_execution=True,
)
@patch("crewai.agent.shutil.which")
@patch("crewai.agent.subprocess.run")
@patch("crewai.agent.os.path.exists")
@patch("crewai.agent.os.getpid")
@patch("builtins.open", side_effect=FileNotFoundError)
def test_docker_validation_fails_docker_not_running(self, mock_file, mock_getpid, mock_exists, mock_run, mock_which, mock_i18n):
"""Test that Docker validation fails when Docker daemon is not running."""
mock_exists.return_value = False
mock_getpid.return_value = 1000
mock_which.return_value = "/usr/bin/docker"
mock_run.side_effect = subprocess.CalledProcessError(1, "docker info")
mock_i18n.return_value = MagicMock()
with pytest.raises(RuntimeError, match="Docker is not running"):
Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
allow_code_execution=True,
)
@patch("crewai.agent.shutil.which")
@patch("crewai.agent.subprocess.run")
@patch("crewai.agent.os.path.exists")
@patch("crewai.agent.os.getpid")
@patch("builtins.open", side_effect=FileNotFoundError)
def test_docker_validation_passes_docker_available(self, mock_file, mock_getpid, mock_exists, mock_run, mock_which, mock_i18n):
"""Test that Docker validation passes when Docker is available."""
mock_exists.return_value = False
mock_getpid.return_value = 1000
mock_which.return_value = "/usr/bin/docker"
mock_run.return_value = Mock(returncode=0)
mock_i18n.return_value = MagicMock()
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
allow_code_execution=True,
)
assert agent.allow_code_execution is True
def test_container_detection_methods(self, mock_i18n):
"""Test the container detection logic directly."""
mock_i18n.return_value = MagicMock()
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
)
with patch("crewai.agent.os.path.exists", return_value=True):
assert agent._is_running_in_container() is True
with patch("crewai.agent.os.path.exists", return_value=False), \
patch("builtins.open", mock_open(read_data="docker")):
assert agent._is_running_in_container() is True
with patch("crewai.agent.os.path.exists", return_value=False), \
patch("builtins.open", side_effect=FileNotFoundError), \
patch("crewai.agent.os.getpid", return_value=1):
assert agent._is_running_in_container() is True
def test_reproduce_original_issue(self, mock_i18n):
"""Test that reproduces the original issue from GitHub issue #3028."""
mock_i18n.return_value = MagicMock()
with patch("crewai.agent.os.path.exists", return_value=True):
agent = Agent(
role="Knowledge Pattern Synthesizer",
goal="Synthesize knowledge patterns",
backstory="You're an expert at synthesizing knowledge patterns.",
allow_code_execution=True,
verbose=True,
memory=True,
max_retry_limit=3
)
assert agent.allow_code_execution is True
assert agent.role == "Knowledge Pattern Synthesizer"

View File

@@ -1,133 +0,0 @@
"""Tests for Pydantic version compatibility issues."""
from unittest.mock import patch, MagicMock
from pydantic import BaseModel
from crewai.flow.flow_trackable import FlowTrackable
from crewai.flow import Flow
class TestFlowTrackable(FlowTrackable, BaseModel):
"""Test class that inherits from FlowTrackable for testing."""
name: str = "test"
class MockFlow(Flow):
"""Mock Flow class for testing."""
def __init__(self):
super().__init__()
def test_flow_trackable_instantiation():
"""Test that FlowTrackable can be instantiated without ValidationInfo errors."""
trackable = TestFlowTrackable()
assert trackable.name == "test"
assert trackable.parent_flow is None
def test_flow_trackable_with_parent_flow():
"""Test that FlowTrackable correctly identifies parent flow from call stack."""
mock_flow = MockFlow()
def create_trackable_in_flow():
return TestFlowTrackable()
with patch('inspect.currentframe') as mock_frame:
mock_current_frame = MagicMock()
mock_parent_frame = MagicMock()
mock_flow_frame = MagicMock()
mock_current_frame.f_back = mock_parent_frame
mock_parent_frame.f_back = mock_flow_frame
mock_flow_frame.f_back = None
mock_parent_frame.f_locals = {}
mock_flow_frame.f_locals = {"self": mock_flow}
mock_frame.return_value = mock_current_frame
trackable = create_trackable_in_flow()
assert trackable.parent_flow == mock_flow
def test_flow_trackable_no_parent_flow():
"""Test that FlowTrackable handles case where no parent flow is found."""
with patch('inspect.currentframe') as mock_frame:
mock_current_frame = MagicMock()
mock_parent_frame = MagicMock()
mock_current_frame.f_back = mock_parent_frame
mock_parent_frame.f_back = None
mock_parent_frame.f_locals = {"self": "not_a_flow"}
mock_frame.return_value = mock_current_frame
trackable = TestFlowTrackable()
assert trackable.parent_flow is None
def test_flow_trackable_max_depth_limit():
"""Test that FlowTrackable respects max_depth limit when searching for parent flow."""
with patch('inspect.currentframe') as mock_frame:
mock_frames = []
for i in range(10):
frame = MagicMock()
frame.f_locals = {"self": f"frame_{i}"}
mock_frames.append(frame)
for i in range(len(mock_frames) - 1):
mock_frames[i].f_back = mock_frames[i + 1]
mock_frames[-1].f_back = None
mock_frame.return_value = mock_frames[0]
trackable = TestFlowTrackable()
assert trackable.parent_flow is None
def test_flow_trackable_none_frame():
"""Test that FlowTrackable handles None frame gracefully."""
with patch('inspect.currentframe', return_value=None):
trackable = TestFlowTrackable()
assert trackable.parent_flow is None
def test_pydantic_model_validator_signature():
"""Test that the model validator has the correct signature for Pydantic compatibility."""
import inspect
from crewai.flow.flow_trackable import FlowTrackable
validator_method = FlowTrackable._set_parent_flow
sig = inspect.signature(validator_method)
params = list(sig.parameters.keys())
assert params == ['self'], f"Expected ['self'], got {params}"
assert sig.return_annotation == "FlowTrackable"
def test_crew_instantiation_with_flow_trackable():
"""Test that Crew can be instantiated without ValidationInfo errors (reproduces issue #3011)."""
from crewai import Crew, Agent, Task
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory"
)
task = Task(
description="Test task",
expected_output="Test output",
agent=agent
)
crew = Crew(
agents=[agent],
tasks=[task]
)
assert crew is not None
assert len(crew.agents) == 1
assert len(crew.tasks) == 1