chore: update dependencies and versioning for CrewAI 0.193.0 (#3542)

* chore: update dependencies and versioning for CrewAI

- Bump `crewai-tools` dependency version from `0.71.0` to `0.73.0` in `pyproject.toml`.
- Update CrewAI version from `0.186.1` to `0.193.0` in `__init__.py`.
- Adjust dependency versions in CLI templates for crew, flow, and tool to reflect the new CrewAI version.

This update ensures compatibility with the latest features and improvements in CrewAI.

* remove embedchain mock

* fix: remove last embedchain mocks

* fix: remove langchain_openai from tests

---------

Co-authored-by: Greyson LaLonde <greyson.r.lalonde@gmail.com>
This commit is contained in:
Lorenze Jay
2025-09-19 12:01:55 -07:00
committed by GitHub
parent 9491fe8334
commit c062826779
9 changed files with 2988 additions and 3446 deletions

View File

@@ -48,7 +48,7 @@ Documentation = "https://docs.crewai.com"
Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = ["crewai-tools~=0.71.0"]
tools = ["crewai-tools~=0.73.0"]
embeddings = [
"tiktoken~=0.8.0"
]

View File

@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
_suppress_pydantic_deprecation_warnings()
__version__ = "0.186.1"
__version__ = "0.193.0"
_telemetry_submitted = False

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.186.1,<1.0.0"
"crewai[tools]>=0.193.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.186.1,<1.0.0",
"crewai[tools]>=0.193.0,<1.0.0",
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.186.1"
"crewai[tools]>=0.193.0"
]
[tool.crewai]

View File

@@ -137,35 +137,6 @@ def test_custom_llm():
assert agent.llm.model == "gpt-4"
def test_custom_llm_with_langchain():
from langchain_openai import ChatOpenAI
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
llm=ChatOpenAI(temperature=0, model="gpt-4"),
)
assert agent.llm.model == "gpt-4"
def test_custom_llm_temperature_preservation():
from langchain_openai import ChatOpenAI
langchain_llm = ChatOpenAI(temperature=0.7, model="gpt-4")
agent = Agent(
role="temperature test role",
goal="temperature test goal",
backstory="temperature test backstory",
llm=langchain_llm,
)
assert isinstance(agent.llm, LLM)
assert agent.llm.model == "gpt-4"
assert agent.llm.temperature == 0.7
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_execution():
agent = Agent(
@@ -2361,13 +2332,11 @@ def mock_get_auth_token():
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
def test_agent_from_repository(mock_get_agent, mock_get_auth_token):
# Mock embedchain initialization to prevent race conditions in parallel CI execution
with patch("embedchain.client.Client.setup"):
from crewai_tools import (
EnterpriseActionTool,
FileReadTool,
SerperDevTool,
)
from crewai_tools import (
EnterpriseActionTool,
FileReadTool,
SerperDevTool,
)
mock_get_response = MagicMock()
mock_get_response.status_code = 200
@@ -2423,9 +2392,7 @@ def test_agent_from_repository(mock_get_agent, mock_get_auth_token):
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
def test_agent_from_repository_override_attributes(mock_get_agent, mock_get_auth_token):
# Mock embedchain initialization to prevent race conditions in parallel CI execution
with patch("embedchain.client.Client.setup"):
from crewai_tools import SerperDevTool
from crewai_tools import SerperDevTool
mock_get_response = MagicMock()
mock_get_response.status_code = 200

View File

@@ -3818,10 +3818,7 @@ def test_task_tools_preserve_code_execution_tools():
"""
Test that task tools don't override code execution tools when allow_code_execution=True
"""
# Mock embedchain initialization to prevent race conditions in parallel CI execution
with patch("embedchain.client.Client.setup"):
from crewai_tools import CodeInterpreterTool
from crewai_tools import CodeInterpreterTool
from pydantic import BaseModel, Field
from crewai.tools import BaseTool

View File

@@ -112,9 +112,9 @@ def test_agent_memoization():
first_call_result = crew.simple_agent()
second_call_result = crew.simple_agent()
assert (
first_call_result is second_call_result
), "Agent memoization is not working as expected"
assert first_call_result is second_call_result, (
"Agent memoization is not working as expected"
)
def test_task_memoization():
@@ -122,9 +122,9 @@ def test_task_memoization():
first_call_result = crew.simple_task()
second_call_result = crew.simple_task()
assert (
first_call_result is second_call_result
), "Task memoization is not working as expected"
assert first_call_result is second_call_result, (
"Task memoization is not working as expected"
)
def test_crew_memoization():
@@ -132,35 +132,35 @@ def test_crew_memoization():
first_call_result = crew.crew()
second_call_result = crew.crew()
assert (
first_call_result is second_call_result
), "Crew references should point to the same object"
assert first_call_result is second_call_result, (
"Crew references should point to the same object"
)
def test_task_name():
simple_task = SimpleCrew().simple_task()
assert (
simple_task.name == "simple_task"
), "Task name is not inferred from function name as expected"
assert simple_task.name == "simple_task", (
"Task name is not inferred from function name as expected"
)
custom_named_task = SimpleCrew().custom_named_task()
assert (
custom_named_task.name == "Custom"
), "Custom task name is not being set as expected"
assert custom_named_task.name == "Custom", (
"Custom task name is not being set as expected"
)
def test_agent_function_calling_llm():
crew = InternalCrew()
llm = crew.local_llm()
obj_llm_agent = crew.researcher()
assert (
obj_llm_agent.function_calling_llm is llm
), "agent's function_calling_llm is incorrect"
assert obj_llm_agent.function_calling_llm is llm, (
"agent's function_calling_llm is incorrect"
)
str_llm_agent = crew.reporting_analyst()
assert (
str_llm_agent.function_calling_llm.model == "online_llm"
), "agent's function_calling_llm is incorrect"
assert str_llm_agent.function_calling_llm.model == "online_llm", (
"agent's function_calling_llm is incorrect"
)
def test_task_guardrail():
@@ -186,9 +186,9 @@ def test_after_kickoff_modification():
# Assuming the crew execution returns a dict
result = crew.crew().kickoff({"topic": "LLMs"})
assert (
"post processed" in result.raw
), "After kickoff function did not modify outputs"
assert "post processed" in result.raw, (
"After kickoff function did not modify outputs"
)
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -274,10 +274,8 @@ def another_simple_tool():
def test_internal_crew_with_mcp():
# Mock embedchain initialization to prevent race conditions in parallel CI execution
with patch("embedchain.client.Client.setup"):
from crewai_tools import MCPServerAdapter
from crewai_tools.adapters.mcp_adapter import ToolCollection
from crewai_tools import MCPServerAdapter
from crewai_tools.adapters.mcp_adapter import ToolCollection
mock = Mock(spec=MCPServerAdapter)
mock.tools = ToolCollection([simple_tool, another_simple_tool])
@@ -287,6 +285,5 @@ def test_internal_crew_with_mcp():
assert crew.researcher().tools == [simple_tool]
adapter_mock.assert_called_once_with(
{"host": "localhost", "port": 8000},
connect_timeout=120
{"host": "localhost", "port": 8000}, connect_timeout=120
)

6317
uv.lock generated

File diff suppressed because it is too large Load Diff