mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 23:58:34 +00:00
Compare commits
4 Commits
devin/1766
...
devin/1766
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
db4cb93770 | ||
|
|
be70a04153 | ||
|
|
0c359f4df8 | ||
|
|
fe288dbe73 |
@@ -12,7 +12,7 @@ dependencies = [
|
||||
"pytube~=15.0.0",
|
||||
"requests~=2.32.5",
|
||||
"docker~=7.1.0",
|
||||
"crewai==1.7.1",
|
||||
"crewai==1.7.2",
|
||||
"lancedb~=0.5.4",
|
||||
"tiktoken~=0.8.0",
|
||||
"beautifulsoup4~=4.13.4",
|
||||
|
||||
@@ -291,4 +291,4 @@ __all__ = [
|
||||
"ZapierActionTools",
|
||||
]
|
||||
|
||||
__version__ = "1.7.1"
|
||||
__version__ = "1.7.2"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""Crewai Enterprise Tools."""
|
||||
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Optional, Union, cast, get_origin
|
||||
@@ -432,7 +432,11 @@ class CrewAIPlatformActionTool(BaseTool):
|
||||
payload = cleaned_kwargs
|
||||
|
||||
response = requests.post(
|
||||
url=api_url, headers=headers, json=payload, timeout=60
|
||||
url=api_url,
|
||||
headers=headers,
|
||||
json=payload,
|
||||
timeout=60,
|
||||
verify=os.environ.get("CREWAI_FACTORY", "false").lower() != "true",
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from typing import Any
|
||||
|
||||
import os
|
||||
from crewai.tools import BaseTool
|
||||
import requests
|
||||
|
||||
@@ -37,6 +37,7 @@ class CrewaiPlatformToolBuilder:
|
||||
headers=headers,
|
||||
timeout=30,
|
||||
params={"apps": ",".join(self._apps)},
|
||||
verify=os.environ.get("CREWAI_FACTORY", "false").lower() != "true",
|
||||
)
|
||||
response.raise_for_status()
|
||||
except Exception:
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
from typing import Union, get_args, get_origin
|
||||
from unittest.mock import patch, Mock
|
||||
import os
|
||||
|
||||
from crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool import (
|
||||
CrewAIPlatformActionTool,
|
||||
@@ -249,3 +251,109 @@ class TestSchemaProcessing:
|
||||
result_type = tool._process_schema_type(test_schema, "TestFieldAllOfMixed")
|
||||
|
||||
assert result_type is str
|
||||
|
||||
class TestCrewAIPlatformActionToolVerify:
|
||||
"""Test suite for SSL verification behavior based on CREWAI_FACTORY environment variable"""
|
||||
|
||||
def setup_method(self):
|
||||
self.action_schema = {
|
||||
"function": {
|
||||
"name": "test_action",
|
||||
"parameters": {
|
||||
"properties": {
|
||||
"test_param": {
|
||||
"type": "string",
|
||||
"description": "Test parameter"
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def create_test_tool(self):
|
||||
return CrewAIPlatformActionTool(
|
||||
description="Test action tool",
|
||||
action_name="test_action",
|
||||
action_schema=self.action_schema
|
||||
)
|
||||
|
||||
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}, clear=True)
|
||||
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
|
||||
def test_run_with_ssl_verification_default(self, mock_post):
|
||||
"""Test that _run uses SSL verification by default when CREWAI_FACTORY is not set"""
|
||||
mock_response = Mock()
|
||||
mock_response.ok = True
|
||||
mock_response.json.return_value = {"result": "success"}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
tool = self.create_test_tool()
|
||||
tool._run(test_param="test_value")
|
||||
|
||||
mock_post.assert_called_once()
|
||||
call_args = mock_post.call_args
|
||||
assert call_args.kwargs["verify"] is True
|
||||
|
||||
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "false"}, clear=True)
|
||||
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
|
||||
def test_run_with_ssl_verification_factory_false(self, mock_post):
|
||||
"""Test that _run uses SSL verification when CREWAI_FACTORY is 'false'"""
|
||||
mock_response = Mock()
|
||||
mock_response.ok = True
|
||||
mock_response.json.return_value = {"result": "success"}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
tool = self.create_test_tool()
|
||||
tool._run(test_param="test_value")
|
||||
|
||||
mock_post.assert_called_once()
|
||||
call_args = mock_post.call_args
|
||||
assert call_args.kwargs["verify"] is True
|
||||
|
||||
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "FALSE"}, clear=True)
|
||||
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
|
||||
def test_run_with_ssl_verification_factory_false_uppercase(self, mock_post):
|
||||
"""Test that _run uses SSL verification when CREWAI_FACTORY is 'FALSE' (case-insensitive)"""
|
||||
mock_response = Mock()
|
||||
mock_response.ok = True
|
||||
mock_response.json.return_value = {"result": "success"}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
tool = self.create_test_tool()
|
||||
tool._run(test_param="test_value")
|
||||
|
||||
mock_post.assert_called_once()
|
||||
call_args = mock_post.call_args
|
||||
assert call_args.kwargs["verify"] is True
|
||||
|
||||
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "true"}, clear=True)
|
||||
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
|
||||
def test_run_without_ssl_verification_factory_true(self, mock_post):
|
||||
"""Test that _run disables SSL verification when CREWAI_FACTORY is 'true'"""
|
||||
mock_response = Mock()
|
||||
mock_response.ok = True
|
||||
mock_response.json.return_value = {"result": "success"}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
tool = self.create_test_tool()
|
||||
tool._run(test_param="test_value")
|
||||
|
||||
mock_post.assert_called_once()
|
||||
call_args = mock_post.call_args
|
||||
assert call_args.kwargs["verify"] is False
|
||||
|
||||
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "TRUE"}, clear=True)
|
||||
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
|
||||
def test_run_without_ssl_verification_factory_true_uppercase(self, mock_post):
|
||||
"""Test that _run disables SSL verification when CREWAI_FACTORY is 'TRUE' (case-insensitive)"""
|
||||
mock_response = Mock()
|
||||
mock_response.ok = True
|
||||
mock_response.json.return_value = {"result": "success"}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
tool = self.create_test_tool()
|
||||
tool._run(test_param="test_value")
|
||||
|
||||
mock_post.assert_called_once()
|
||||
call_args = mock_post.call_args
|
||||
assert call_args.kwargs["verify"] is False
|
||||
|
||||
@@ -258,3 +258,98 @@ class TestCrewaiPlatformToolBuilder(unittest.TestCase):
|
||||
assert "simple_string" in description_text
|
||||
assert "nested_object" in description_text
|
||||
assert "array_prop" in description_text
|
||||
|
||||
|
||||
|
||||
class TestCrewaiPlatformToolBuilderVerify(unittest.TestCase):
|
||||
"""Test suite for SSL verification behavior in CrewaiPlatformToolBuilder"""
|
||||
|
||||
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}, clear=True)
|
||||
@patch(
|
||||
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
|
||||
)
|
||||
def test_fetch_actions_with_ssl_verification_default(self, mock_get):
|
||||
"""Test that _fetch_actions uses SSL verification by default when CREWAI_FACTORY is not set"""
|
||||
mock_response = Mock()
|
||||
mock_response.raise_for_status.return_value = None
|
||||
mock_response.json.return_value = {"actions": {}}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
builder = CrewaiPlatformToolBuilder(apps=["github"])
|
||||
builder._fetch_actions()
|
||||
|
||||
mock_get.assert_called_once()
|
||||
call_args = mock_get.call_args
|
||||
assert call_args.kwargs["verify"] is True
|
||||
|
||||
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "false"}, clear=True)
|
||||
@patch(
|
||||
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
|
||||
)
|
||||
def test_fetch_actions_with_ssl_verification_factory_false(self, mock_get):
|
||||
"""Test that _fetch_actions uses SSL verification when CREWAI_FACTORY is 'false'"""
|
||||
mock_response = Mock()
|
||||
mock_response.raise_for_status.return_value = None
|
||||
mock_response.json.return_value = {"actions": {}}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
builder = CrewaiPlatformToolBuilder(apps=["github"])
|
||||
builder._fetch_actions()
|
||||
|
||||
mock_get.assert_called_once()
|
||||
call_args = mock_get.call_args
|
||||
assert call_args.kwargs["verify"] is True
|
||||
|
||||
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "FALSE"}, clear=True)
|
||||
@patch(
|
||||
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
|
||||
)
|
||||
def test_fetch_actions_with_ssl_verification_factory_false_uppercase(self, mock_get):
|
||||
"""Test that _fetch_actions uses SSL verification when CREWAI_FACTORY is 'FALSE' (case-insensitive)"""
|
||||
mock_response = Mock()
|
||||
mock_response.raise_for_status.return_value = None
|
||||
mock_response.json.return_value = {"actions": {}}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
builder = CrewaiPlatformToolBuilder(apps=["github"])
|
||||
builder._fetch_actions()
|
||||
|
||||
mock_get.assert_called_once()
|
||||
call_args = mock_get.call_args
|
||||
assert call_args.kwargs["verify"] is True
|
||||
|
||||
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "true"}, clear=True)
|
||||
@patch(
|
||||
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
|
||||
)
|
||||
def test_fetch_actions_without_ssl_verification_factory_true(self, mock_get):
|
||||
"""Test that _fetch_actions disables SSL verification when CREWAI_FACTORY is 'true'"""
|
||||
mock_response = Mock()
|
||||
mock_response.raise_for_status.return_value = None
|
||||
mock_response.json.return_value = {"actions": {}}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
builder = CrewaiPlatformToolBuilder(apps=["github"])
|
||||
builder._fetch_actions()
|
||||
|
||||
mock_get.assert_called_once()
|
||||
call_args = mock_get.call_args
|
||||
assert call_args.kwargs["verify"] is False
|
||||
|
||||
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "TRUE"}, clear=True)
|
||||
@patch(
|
||||
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
|
||||
)
|
||||
def test_fetch_actions_without_ssl_verification_factory_true_uppercase(self, mock_get):
|
||||
"""Test that _fetch_actions disables SSL verification when CREWAI_FACTORY is 'TRUE' (case-insensitive)"""
|
||||
mock_response = Mock()
|
||||
mock_response.raise_for_status.return_value = None
|
||||
mock_response.json.return_value = {"actions": {}}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
builder = CrewaiPlatformToolBuilder(apps=["github"])
|
||||
builder._fetch_actions()
|
||||
|
||||
mock_get.assert_called_once()
|
||||
call_args = mock_get.call_args
|
||||
assert call_args.kwargs["verify"] is False
|
||||
|
||||
@@ -49,7 +49,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = [
|
||||
"crewai-tools==1.7.1",
|
||||
"crewai-tools==1.7.2",
|
||||
]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
|
||||
@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
__version__ = "1.7.1"
|
||||
__version__ = "1.7.2"
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
|
||||
@@ -1576,7 +1576,17 @@ class Agent(BaseAgent):
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
if self.apps:
|
||||
platform_tools = self.get_platform_tools(self.apps)
|
||||
if platform_tools and self.tools is not None:
|
||||
self.tools.extend(platform_tools)
|
||||
if self.mcps:
|
||||
mcps = self.get_mcp_tools(self.mcps)
|
||||
if mcps and self.tools is not None:
|
||||
self.tools.extend(mcps)
|
||||
|
||||
lite_agent = LiteAgent(
|
||||
id=self.id,
|
||||
role=self.role,
|
||||
goal=self.goal,
|
||||
backstory=self.backstory,
|
||||
|
||||
@@ -251,6 +251,10 @@ def prepare_tools(
|
||||
) -> list[BaseTool]:
|
||||
"""Prepare tools for task execution and create agent executor.
|
||||
|
||||
This function prepares tools for task execution, including injecting MCP tools
|
||||
if the agent has MCP server configurations. MCP tools are merged with existing
|
||||
tools, with MCP tools replacing any existing tools with the same name.
|
||||
|
||||
Args:
|
||||
agent: The agent instance.
|
||||
tools: Optional list of tools.
|
||||
@@ -259,7 +263,25 @@ def prepare_tools(
|
||||
Returns:
|
||||
The list of tools to use.
|
||||
"""
|
||||
final_tools = tools or agent.tools or []
|
||||
# Create a copy to avoid mutating the original list
|
||||
final_tools = list(tools or agent.tools or [])
|
||||
|
||||
# Inject MCP tools if agent has mcps configured
|
||||
if hasattr(agent, "mcps") and agent.mcps:
|
||||
try:
|
||||
mcp_tools = agent.get_mcp_tools(agent.mcps)
|
||||
if mcp_tools:
|
||||
# Merge tools: MCP tools replace existing tools with the same name
|
||||
mcp_tool_names = {tool.name for tool in mcp_tools}
|
||||
final_tools = [
|
||||
tool for tool in final_tools if tool.name not in mcp_tool_names
|
||||
]
|
||||
final_tools.extend(mcp_tools)
|
||||
except Exception as e:
|
||||
agent._logger.log(
|
||||
"warning", f"Failed to get MCP tools, continuing without them: {e}"
|
||||
)
|
||||
|
||||
agent.create_agent_executor(tools=final_tools, task=task)
|
||||
return final_tools
|
||||
|
||||
|
||||
@@ -149,7 +149,9 @@ class AuthenticationCommand:
|
||||
return
|
||||
|
||||
if token_data["error"] not in ("authorization_pending", "slow_down"):
|
||||
raise requests.HTTPError(token_data["error_description"])
|
||||
raise requests.HTTPError(
|
||||
token_data.get("error_description") or token_data.get("error")
|
||||
)
|
||||
|
||||
time.sleep(device_code_data["interval"])
|
||||
attempts += 1
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Any
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import os
|
||||
import requests
|
||||
|
||||
from crewai.cli.config import Settings
|
||||
@@ -33,9 +33,7 @@ class PlusAPI:
|
||||
if settings.org_uuid:
|
||||
self.headers["X-Crewai-Organization-Id"] = settings.org_uuid
|
||||
|
||||
self.base_url = (
|
||||
str(settings.enterprise_base_url) or DEFAULT_CREWAI_ENTERPRISE_URL
|
||||
)
|
||||
self.base_url = os.getenv("CREWAI_PLUS_URL") or str(settings.enterprise_base_url) or DEFAULT_CREWAI_ENTERPRISE_URL
|
||||
|
||||
def _make_request(
|
||||
self, method: str, endpoint: str, **kwargs: Any
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.7.1"
|
||||
"crewai[tools]==1.7.2"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.7.1"
|
||||
"crewai[tools]==1.7.2"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -24,7 +24,6 @@ from pydantic import (
|
||||
BaseModel,
|
||||
Field,
|
||||
PrivateAttr,
|
||||
ValidationError,
|
||||
field_validator,
|
||||
model_validator,
|
||||
)
|
||||
@@ -44,7 +43,7 @@ from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.constants import NOT_SPECIFIED, _NotSpecified
|
||||
from crewai.utilities.converter import Converter, ConverterError, convert_to_model
|
||||
from crewai.utilities.converter import Converter, convert_to_model
|
||||
from crewai.utilities.guardrail import (
|
||||
process_guardrail,
|
||||
)
|
||||
@@ -1045,13 +1044,7 @@ Follow these guidelines:
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
try:
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
except (ValidationError, ConverterError):
|
||||
# If export fails due to invalid output format, set outputs to None
|
||||
# and let the next iteration's guardrail check handle it
|
||||
pydantic_output, json_output = None, None
|
||||
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
task_output = TaskOutput(
|
||||
name=self.name or self.description,
|
||||
description=self.description,
|
||||
@@ -1147,13 +1140,7 @@ Follow these guidelines:
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
try:
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
except (ValidationError, ConverterError):
|
||||
# If export fails due to invalid output format, set outputs to None
|
||||
# and let the next iteration's guardrail check handle it
|
||||
pydantic_output, json_output = None, None
|
||||
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
task_output = TaskOutput(
|
||||
name=self.name or self.description,
|
||||
description=self.description,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
import unittest
|
||||
from unittest.mock import ANY, MagicMock, patch
|
||||
|
||||
from crewai.cli.constants import DEFAULT_CREWAI_ENTERPRISE_URL
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ class TestPlusAPI(unittest.TestCase):
|
||||
):
|
||||
mock_make_request.assert_called_once_with(
|
||||
method,
|
||||
f"{DEFAULT_CREWAI_ENTERPRISE_URL}{endpoint}",
|
||||
f"{os.getenv('CREWAI_PLUS_URL')}{endpoint}",
|
||||
headers={
|
||||
"Authorization": ANY,
|
||||
"Content-Type": ANY,
|
||||
@@ -53,7 +53,7 @@ class TestPlusAPI(unittest.TestCase):
|
||||
):
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.org_uuid = self.org_uuid
|
||||
mock_settings.enterprise_base_url = DEFAULT_CREWAI_ENTERPRISE_URL
|
||||
mock_settings.enterprise_base_url = os.getenv('CREWAI_PLUS_URL')
|
||||
mock_settings_class.return_value = mock_settings
|
||||
# re-initialize Client
|
||||
self.api = PlusAPI(self.api_key)
|
||||
@@ -84,7 +84,7 @@ class TestPlusAPI(unittest.TestCase):
|
||||
def test_get_agent_with_org_uuid(self, mock_make_request, mock_settings_class):
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.org_uuid = self.org_uuid
|
||||
mock_settings.enterprise_base_url = DEFAULT_CREWAI_ENTERPRISE_URL
|
||||
mock_settings.enterprise_base_url = os.getenv('CREWAI_PLUS_URL')
|
||||
mock_settings_class.return_value = mock_settings
|
||||
# re-initialize Client
|
||||
self.api = PlusAPI(self.api_key)
|
||||
@@ -115,7 +115,7 @@ class TestPlusAPI(unittest.TestCase):
|
||||
def test_get_tool_with_org_uuid(self, mock_make_request, mock_settings_class):
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.org_uuid = self.org_uuid
|
||||
mock_settings.enterprise_base_url = DEFAULT_CREWAI_ENTERPRISE_URL
|
||||
mock_settings.enterprise_base_url = os.getenv('CREWAI_PLUS_URL')
|
||||
mock_settings_class.return_value = mock_settings
|
||||
# re-initialize Client
|
||||
self.api = PlusAPI(self.api_key)
|
||||
@@ -163,7 +163,7 @@ class TestPlusAPI(unittest.TestCase):
|
||||
def test_publish_tool_with_org_uuid(self, mock_make_request, mock_settings_class):
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.org_uuid = self.org_uuid
|
||||
mock_settings.enterprise_base_url = DEFAULT_CREWAI_ENTERPRISE_URL
|
||||
mock_settings.enterprise_base_url = os.getenv('CREWAI_PLUS_URL')
|
||||
mock_settings_class.return_value = mock_settings
|
||||
# re-initialize Client
|
||||
self.api = PlusAPI(self.api_key)
|
||||
@@ -320,6 +320,7 @@ class TestPlusAPI(unittest.TestCase):
|
||||
)
|
||||
|
||||
@patch("crewai.cli.plus_api.Settings")
|
||||
@patch.dict(os.environ, {"CREWAI_PLUS_URL": ""})
|
||||
def test_custom_base_url(self, mock_settings_class):
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.enterprise_base_url = "https://custom-url.com/api"
|
||||
@@ -329,3 +330,11 @@ class TestPlusAPI(unittest.TestCase):
|
||||
custom_api.base_url,
|
||||
"https://custom-url.com/api",
|
||||
)
|
||||
|
||||
@patch.dict(os.environ, {"CREWAI_PLUS_URL": "https://custom-url-from-env.com"})
|
||||
def test_custom_base_url_from_env(self):
|
||||
custom_api = PlusAPI("test_key")
|
||||
self.assertEqual(
|
||||
custom_api.base_url,
|
||||
"https://custom-url-from-env.com",
|
||||
)
|
||||
|
||||
@@ -3,7 +3,9 @@ from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.agent.utils import prepare_tools
|
||||
from crewai.mcp.config import MCPServerHTTP, MCPServerSSE, MCPServerStdio
|
||||
from crewai.task import Task
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
@@ -198,3 +200,200 @@ async def test_mcp_tool_execution_in_async_context(mock_tool_definitions):
|
||||
|
||||
assert result == "test result"
|
||||
mock_client.call_tool.assert_called()
|
||||
|
||||
|
||||
def test_prepare_tools_injects_mcp_tools(mock_tool_definitions):
|
||||
"""Test that prepare_tools injects MCP tools when agent has mcps configured.
|
||||
|
||||
This is the core fix for issue #4133 - LLM doesn't see MCP tools when
|
||||
using standalone agent execution (without Crew).
|
||||
"""
|
||||
http_config = MCPServerHTTP(url="https://api.example.com/mcp")
|
||||
|
||||
with patch("crewai.agent.core.MCPClient") as mock_client_class:
|
||||
mock_client = AsyncMock()
|
||||
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
|
||||
mock_client.connected = False
|
||||
mock_client.connect = AsyncMock()
|
||||
mock_client.disconnect = AsyncMock()
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
mcps=[http_config],
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
final_tools = prepare_tools(agent, None, task)
|
||||
|
||||
assert len(final_tools) == 2
|
||||
assert all(isinstance(tool, BaseTool) for tool in final_tools)
|
||||
tool_names = [tool.name for tool in final_tools]
|
||||
assert any("test_tool_1" in name for name in tool_names)
|
||||
assert any("test_tool_2" in name for name in tool_names)
|
||||
|
||||
|
||||
def test_prepare_tools_merges_mcp_tools_with_existing_tools(mock_tool_definitions):
|
||||
"""Test that prepare_tools merges MCP tools with existing agent tools.
|
||||
|
||||
MCP tools are added alongside existing tools. Note that MCP tools have
|
||||
prefixed names (based on server URL), so they won't conflict with
|
||||
existing tools that have the same base name.
|
||||
"""
|
||||
http_config = MCPServerHTTP(url="https://api.example.com/mcp")
|
||||
|
||||
class ExistingTool(BaseTool):
|
||||
name: str = "existing_tool"
|
||||
description: str = "An existing tool"
|
||||
|
||||
def _run(self, **kwargs):
|
||||
return "existing result"
|
||||
|
||||
class AnotherTool(BaseTool):
|
||||
name: str = "another_tool"
|
||||
description: str = "Another existing tool"
|
||||
|
||||
def _run(self, **kwargs):
|
||||
return "another result"
|
||||
|
||||
with patch("crewai.agent.core.MCPClient") as mock_client_class:
|
||||
mock_client = AsyncMock()
|
||||
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
|
||||
mock_client.connected = False
|
||||
mock_client.connect = AsyncMock()
|
||||
mock_client.disconnect = AsyncMock()
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
tools=[ExistingTool(), AnotherTool()],
|
||||
mcps=[http_config],
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
final_tools = prepare_tools(agent, None, task)
|
||||
|
||||
assert len(final_tools) == 4
|
||||
tool_names = [tool.name for tool in final_tools]
|
||||
assert "existing_tool" in tool_names
|
||||
assert "another_tool" in tool_names
|
||||
assert any("test_tool_1" in name for name in tool_names)
|
||||
assert any("test_tool_2" in name for name in tool_names)
|
||||
|
||||
|
||||
def test_prepare_tools_does_not_mutate_original_tools_list(mock_tool_definitions):
|
||||
"""Test that prepare_tools does not mutate the original tools list."""
|
||||
http_config = MCPServerHTTP(url="https://api.example.com/mcp")
|
||||
|
||||
class ExistingTool(BaseTool):
|
||||
name: str = "existing_tool"
|
||||
description: str = "An existing tool"
|
||||
|
||||
def _run(self, **kwargs):
|
||||
return "existing result"
|
||||
|
||||
original_tools = [ExistingTool()]
|
||||
original_tools_copy = list(original_tools)
|
||||
|
||||
with patch("crewai.agent.core.MCPClient") as mock_client_class:
|
||||
mock_client = AsyncMock()
|
||||
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
|
||||
mock_client.connected = False
|
||||
mock_client.connect = AsyncMock()
|
||||
mock_client.disconnect = AsyncMock()
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
tools=original_tools,
|
||||
mcps=[http_config],
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
final_tools = prepare_tools(agent, original_tools, task)
|
||||
|
||||
assert len(original_tools) == len(original_tools_copy)
|
||||
assert len(final_tools) == 3
|
||||
|
||||
|
||||
def test_prepare_tools_handles_mcp_failure_gracefully(mock_tool_definitions):
|
||||
"""Test that prepare_tools continues without MCP tools if get_mcp_tools fails."""
|
||||
http_config = MCPServerHTTP(url="https://api.example.com/mcp")
|
||||
|
||||
class ExistingTool(BaseTool):
|
||||
name: str = "existing_tool"
|
||||
description: str = "An existing tool"
|
||||
|
||||
def _run(self, **kwargs):
|
||||
return "existing result"
|
||||
|
||||
with patch("crewai.agent.core.MCPClient") as mock_client_class:
|
||||
mock_client_class.side_effect = Exception("Connection failed")
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
tools=[ExistingTool()],
|
||||
mcps=[http_config],
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
final_tools = prepare_tools(agent, None, task)
|
||||
|
||||
assert len(final_tools) == 1
|
||||
assert final_tools[0].name == "existing_tool"
|
||||
|
||||
|
||||
def test_prepare_tools_without_mcps():
|
||||
"""Test that prepare_tools works normally when agent has no mcps configured."""
|
||||
class ExistingTool(BaseTool):
|
||||
name: str = "existing_tool"
|
||||
description: str = "An existing tool"
|
||||
|
||||
def _run(self, **kwargs):
|
||||
return "existing result"
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
tools=[ExistingTool()],
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
final_tools = prepare_tools(agent, None, task)
|
||||
|
||||
assert len(final_tools) == 1
|
||||
assert final_tools[0].name == "existing_tool"
|
||||
|
||||
@@ -752,155 +752,3 @@ def test_per_guardrail_independent_retry_tracking():
|
||||
assert call_counts["g3"] == 1
|
||||
|
||||
assert "G3(1)" in result.raw
|
||||
|
||||
|
||||
def test_guardrail_retries_with_invalid_pydantic_output():
|
||||
"""Test that guardrail retries work when agent produces invalid pydantic output.
|
||||
|
||||
This test covers the bug reported in issue #4126 where guardrail_max_retries
|
||||
logic was broken due to unhandled ValidationError in _invoke_guardrail_function.
|
||||
When the agent produces invalid JSON that fails Pydantic validation, the system
|
||||
should continue retrying instead of crashing.
|
||||
"""
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class OutputModel(BaseModel):
|
||||
title: str = Field(description="The title")
|
||||
content: str = Field(description="The content")
|
||||
|
||||
call_count = 0
|
||||
|
||||
def mock_execute_task(*args, **kwargs):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count == 1:
|
||||
return "invalid json that will fail pydantic validation"
|
||||
elif call_count == 2:
|
||||
return "still invalid { broken json"
|
||||
else:
|
||||
return '{"title": "Valid Title", "content": "Valid Content"}'
|
||||
|
||||
def always_fail_guardrail(result: TaskOutput) -> tuple[bool, str]:
|
||||
if call_count < 3:
|
||||
return (False, "Output not valid yet")
|
||||
return (True, result.raw)
|
||||
|
||||
agent = Mock()
|
||||
agent.role = "test_agent"
|
||||
agent.execute_task = mock_execute_task
|
||||
agent.crew = None
|
||||
agent.last_messages = []
|
||||
|
||||
task = create_smart_task(
|
||||
description="Test pydantic validation during guardrail retries",
|
||||
expected_output="Valid structured output",
|
||||
guardrail=always_fail_guardrail,
|
||||
output_pydantic=OutputModel,
|
||||
guardrail_max_retries=3,
|
||||
)
|
||||
|
||||
result = task.execute_sync(agent=agent)
|
||||
|
||||
assert call_count == 3
|
||||
assert result.pydantic is not None
|
||||
assert result.pydantic.title == "Valid Title"
|
||||
assert result.pydantic.content == "Valid Content"
|
||||
|
||||
|
||||
def test_guardrail_max_retries_exhausted_with_invalid_pydantic():
|
||||
"""Test that max retries are properly exhausted even with pydantic validation errors.
|
||||
|
||||
This ensures that when the agent consistently produces invalid output that fails
|
||||
pydantic validation, the retry loop continues until max_retries is exhausted,
|
||||
rather than crashing on the first validation error.
|
||||
"""
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class StrictModel(BaseModel):
|
||||
required_field: str = Field(description="A required field")
|
||||
|
||||
call_count = 0
|
||||
|
||||
def mock_execute_task(*args, **kwargs):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return "this is not valid json and will always fail"
|
||||
|
||||
def always_fail_guardrail(result: TaskOutput) -> tuple[bool, str]:
|
||||
return (False, "Output is not valid")
|
||||
|
||||
agent = Mock()
|
||||
agent.role = "test_agent"
|
||||
agent.execute_task = mock_execute_task
|
||||
agent.crew = None
|
||||
agent.last_messages = []
|
||||
|
||||
task = create_smart_task(
|
||||
description="Test max retries with invalid pydantic",
|
||||
expected_output="Structured output",
|
||||
guardrail=always_fail_guardrail,
|
||||
output_pydantic=StrictModel,
|
||||
guardrail_max_retries=2,
|
||||
)
|
||||
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
task.execute_sync(agent=agent)
|
||||
|
||||
assert "Task failed guardrail validation after 2 retries" in str(exc_info.value)
|
||||
assert call_count == 3
|
||||
|
||||
|
||||
def test_guardrail_with_pydantic_validation_error_continues_retry():
|
||||
"""Test that pydantic ValidationError during retry doesn't crash the loop.
|
||||
|
||||
This is a regression test for issue #4126. The bug was that when _export_output
|
||||
raised a ValidationError during the guardrail retry loop, it would crash instead
|
||||
of continuing to the next retry attempt.
|
||||
"""
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class TestModel(BaseModel):
|
||||
value: int = Field(description="An integer value")
|
||||
|
||||
execution_results = [
|
||||
"not json",
|
||||
'{"value": "not_an_int"}',
|
||||
'{"value": 42}',
|
||||
]
|
||||
call_index = 0
|
||||
|
||||
def mock_execute_task(*args, **kwargs):
|
||||
nonlocal call_index
|
||||
result = execution_results[call_index]
|
||||
call_index += 1
|
||||
return result
|
||||
|
||||
guardrail_calls = 0
|
||||
|
||||
def counting_guardrail(result: TaskOutput) -> tuple[bool, str]:
|
||||
nonlocal guardrail_calls
|
||||
guardrail_calls += 1
|
||||
if guardrail_calls < 3:
|
||||
return (False, f"Retry attempt {guardrail_calls}")
|
||||
return (True, result.raw)
|
||||
|
||||
agent = Mock()
|
||||
agent.role = "test_agent"
|
||||
agent.execute_task = mock_execute_task
|
||||
agent.crew = None
|
||||
agent.last_messages = []
|
||||
|
||||
task = create_smart_task(
|
||||
description="Test ValidationError handling in retry loop",
|
||||
expected_output="Integer output",
|
||||
guardrail=counting_guardrail,
|
||||
output_pydantic=TestModel,
|
||||
guardrail_max_retries=3,
|
||||
)
|
||||
|
||||
result = task.execute_sync(agent=agent)
|
||||
|
||||
assert call_index == 3
|
||||
assert guardrail_calls == 3
|
||||
assert result.pydantic is not None
|
||||
assert result.pydantic.value == 42
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
"""CrewAI development tools."""
|
||||
|
||||
__version__ = "1.7.1"
|
||||
__version__ = "1.7.2"
|
||||
|
||||
Reference in New Issue
Block a user