Merge branch 'main' into lg-mcp-event-loop

This commit is contained in:
Greyson LaLonde
2026-03-02 12:05:06 -05:00
committed by GitHub
19 changed files with 412 additions and 348 deletions

View File

@@ -8,8 +8,8 @@ authors = [
]
requires-python = ">=3.10, <3.14"
dependencies = [
"Pillow~=10.4.0",
"pypdf~=4.0.0",
"Pillow~=12.1.1",
"pypdf~=6.7.4",
"python-magic>=0.4.27",
"aiocache~=0.12.3",
"aiofiles~=24.1.0",

View File

@@ -66,7 +66,7 @@ openpyxl = [
]
mem0 = ["mem0ai~=0.1.94"]
docling = [
"docling~=2.63.0",
"docling~=2.75.0",
]
qdrant = [
"qdrant-client[fastembed]~=1.14.3",

View File

@@ -487,8 +487,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
# No tools available, fall back to simple LLM call
return self._invoke_loop_native_no_tools()
openai_tools, available_functions = convert_tools_to_openai_schema(
self.original_tools
openai_tools, available_functions, self._tool_name_mapping = (
convert_tools_to_openai_schema(self.original_tools)
)
while True:
@@ -700,9 +700,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
if not parsed_calls:
return None
original_tools_by_name: dict[str, Any] = {}
for tool in self.original_tools or []:
original_tools_by_name[sanitize_tool_name(tool.name)] = tool
original_tools_by_name: dict[str, Any] = dict(self._tool_name_mapping)
if len(parsed_calls) > 1:
has_result_as_answer_in_batch = any(
@@ -949,10 +947,16 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
track_delegation_if_needed(func_name, args_dict, self.task)
structured_tool: CrewStructuredTool | None = None
for structured in self.tools or []:
if sanitize_tool_name(structured.name) == func_name:
structured_tool = structured
break
if original_tool is not None:
for structured in self.tools or []:
if getattr(structured, "_original_tool", None) is original_tool:
structured_tool = structured
break
if structured_tool is None:
for structured in self.tools or []:
if sanitize_tool_name(structured.name) == func_name:
structured_tool = structured
break
hook_blocked = False
before_hook_context = ToolCallHookContext(
@@ -1312,8 +1316,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
if not self.original_tools:
return await self._ainvoke_loop_native_no_tools()
openai_tools, available_functions = convert_tools_to_openai_schema(
self.original_tools
openai_tools, available_functions, self._tool_name_mapping = (
convert_tools_to_openai_schema(self.original_tools)
)
while True:

View File

@@ -22,14 +22,15 @@ class PlusAPI:
EPHEMERAL_TRACING_RESOURCE = "/crewai_plus/api/v1/tracing/ephemeral"
INTEGRATIONS_RESOURCE = "/crewai_plus/api/v1/integrations"
def __init__(self, api_key: str) -> None:
def __init__(self, api_key: str | None = None) -> None:
self.api_key = api_key
self.headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"User-Agent": f"CrewAI-CLI/{get_crewai_version()}",
"X-Crewai-Version": get_crewai_version(),
}
if api_key:
self.headers["Authorization"] = f"Bearer {api_key}"
settings = Settings()
if settings.org_uuid:
self.headers["X-Crewai-Organization-Id"] = settings.org_uuid

View File

@@ -67,7 +67,7 @@ class TraceBatchManager:
api_key=get_auth_token(),
)
except AuthError:
self.plus_api = PlusAPI(api_key="")
self.plus_api = PlusAPI()
self.ephemeral_trace_url = None
def initialize_batch(

View File

@@ -52,6 +52,8 @@ from crewai.hooks.types import (
BeforeLLMCallHookCallable,
BeforeLLMCallHookType,
)
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.utilities.agent_utils import (
convert_tools_to_openai_schema,
enforce_rpm_limit,
@@ -85,8 +87,6 @@ if TYPE_CHECKING:
from crewai.crew import Crew
from crewai.llms.base_llm import BaseLLM
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_types import ToolResult
from crewai.utilities.prompts import StandardPromptResult, SystemPromptResult
@@ -321,7 +321,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
def _setup_native_tools(self) -> None:
"""Convert tools to OpenAI schema format for native function calling."""
if self.original_tools:
self._openai_tools, self._available_functions = (
self._openai_tools, self._available_functions, self._tool_name_mapping = (
convert_tools_to_openai_schema(self.original_tools)
)
@@ -594,21 +594,19 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
def execute_tool_action(self) -> Literal["tool_completed", "tool_result_is_final"]:
"""Execute the tool action and handle the result."""
action = cast(AgentAction, self.state.current_answer)
fingerprint_context = {}
if (
self.agent
and hasattr(self.agent, "security_config")
and hasattr(self.agent.security_config, "fingerprint")
):
fingerprint_context = {
"agent_fingerprint": str(self.agent.security_config.fingerprint)
}
try:
action = cast(AgentAction, self.state.current_answer)
# Extract fingerprint context for tool execution
fingerprint_context = {}
if (
self.agent
and hasattr(self.agent, "security_config")
and hasattr(self.agent.security_config, "fingerprint")
):
fingerprint_context = {
"agent_fingerprint": str(self.agent.security_config.fingerprint)
}
# Execute the tool
tool_result = execute_tool_and_check_finality(
agent_action=action,
fingerprint_context=fingerprint_context,
@@ -622,24 +620,19 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
function_calling_llm=self.function_calling_llm,
crew=self.crew,
)
except Exception as e:
if self.agent and self.agent.verbose:
self._printer.print(
content=f"Error in tool execution: {e}", color="red"
)
if self.task:
self.task.increment_tools_errors()
# Handle agent action and append observation to messages
result = self._handle_agent_action(action, tool_result)
self.state.current_answer = result
error_observation = f"\nObservation: Error executing tool: {e}"
action.text += error_observation
action.result = str(e)
self._append_message_to_state(action.text)
# Invoke step callback if configured
self._invoke_step_callback(result)
# Append result message to conversation state
if hasattr(result, "text"):
self._append_message_to_state(result.text)
# Check if tool result became a final answer (result_as_answer flag)
if isinstance(result, AgentFinish):
self.state.is_finished = True
return "tool_result_is_final"
# Inject post-tool reasoning prompt to enforce analysis
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
reasoning_message: LLMMessage = {
"role": "user",
@@ -649,12 +642,26 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
return "tool_completed"
except Exception as e:
error_text = Text()
error_text.append("❌ Error in tool execution: ", style="red bold")
error_text.append(str(e), style="red")
self._console.print(error_text)
raise
result = self._handle_agent_action(action, tool_result)
self.state.current_answer = result
self._invoke_step_callback(result)
if hasattr(result, "text"):
self._append_message_to_state(result.text)
if isinstance(result, AgentFinish):
self.state.is_finished = True
return "tool_result_is_final"
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
reasoning_message: LLMMessage = {
"role": "user",
"content": reasoning_prompt,
}
self.state.messages.append(reasoning_message)
return "tool_completed"
@listen("native_tool_calls")
def execute_native_tool(
@@ -728,7 +735,20 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
)
for future in as_completed(future_to_idx):
idx = future_to_idx[future]
ordered_results[idx] = future.result()
try:
ordered_results[idx] = future.result()
except Exception as e:
tool_call = runnable_tool_calls[idx]
info = extract_tool_call_info(tool_call)
call_id = info[0] if info else "unknown"
func_name = info[1] if info else "unknown"
ordered_results[idx] = {
"call_id": call_id,
"func_name": func_name,
"result": f"Error executing tool: {e}",
"from_cache": False,
"original_tool": None,
}
execution_results = [
result for result in ordered_results if result is not None
]
@@ -824,11 +844,17 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
continue
_, func_name, _ = info
original_tool = None
for tool in self.original_tools or []:
if sanitize_tool_name(tool.name) == func_name:
original_tool = tool
break
mapping = getattr(self, "_tool_name_mapping", None)
original_tool: BaseTool | None = None
if mapping and func_name in mapping:
mapped = mapping[func_name]
if isinstance(mapped, BaseTool):
original_tool = mapped
if original_tool is None:
for tool in self.original_tools or []:
if sanitize_tool_name(tool.name) == func_name:
original_tool = tool
break
if not original_tool:
continue
@@ -844,7 +870,18 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
"""Execute a single native tool call and return metadata/result."""
info = extract_tool_call_info(tool_call)
if not info:
raise ValueError("Invalid native tool call format")
call_id = (
getattr(tool_call, "id", None)
or (tool_call.get("id") if isinstance(tool_call, dict) else None)
or "unknown"
)
return {
"call_id": call_id,
"func_name": "unknown",
"result": "Error: Invalid native tool call format",
"from_cache": False,
"original_tool": None,
}
call_id, func_name, func_args = info
@@ -856,12 +893,17 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
# Get agent_key for event tracking
agent_key = getattr(self.agent, "key", "unknown") if self.agent else "unknown"
# Find original tool by matching sanitized name (needed for cache_function and result_as_answer)
original_tool = None
for tool in self.original_tools or []:
if sanitize_tool_name(tool.name) == func_name:
original_tool = tool
break
original_tool: BaseTool | None = None
mapping = getattr(self, "_tool_name_mapping", None)
if mapping and func_name in mapping:
mapped = mapping[func_name]
if isinstance(mapped, BaseTool):
original_tool = mapped
if original_tool is None:
for tool in self.original_tools or []:
if sanitize_tool_name(tool.name) == func_name:
original_tool = tool
break
# Check if tool has reached max usage count
max_usage_reached = False
@@ -904,10 +946,16 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
track_delegation_if_needed(func_name, args_dict, self.task)
structured_tool: CrewStructuredTool | None = None
for structured in self.tools or []:
if sanitize_tool_name(structured.name) == func_name:
structured_tool = structured
break
if original_tool is not None:
for structured in self.tools or []:
if getattr(structured, "_original_tool", None) is original_tool:
structured_tool = structured
break
if structured_tool is None:
for structured in self.tools or []:
if sanitize_tool_name(structured.name) == func_name:
structured_tool = structured
break
hook_blocked = False
before_hook_context = ToolCallHookContext(

View File

@@ -173,6 +173,12 @@ class Telemetry:
self._original_handlers: dict[int, Any] = {}
if threading.current_thread() is not threading.main_thread():
logger.debug(
"Skipping signal handler registration: not running in main thread"
)
return
self._register_signal_handler(signal.SIGTERM, SigTermEvent, shutdown=True)
self._register_signal_handler(signal.SIGINT, SigIntEvent, shutdown=True)
if hasattr(signal, "SIGHUP"):

View File

@@ -23,7 +23,7 @@ from pydantic import (
)
from typing_extensions import TypeIs
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.structured_tool import CrewStructuredTool, build_schema_hint
from crewai.utilities.printer import Printer
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.string_utils import sanitize_tool_name
@@ -167,8 +167,9 @@ class BaseTool(BaseModel, ABC):
validated = self.args_schema.model_validate(kwargs)
return validated.model_dump()
except Exception as e:
hint = build_schema_hint(self.args_schema)
raise ValueError(
f"Tool '{self.name}' arguments validation failed: {e}"
f"Tool '{self.name}' arguments validation failed: {e}{hint}"
) from e
return kwargs

View File

@@ -17,6 +17,27 @@ if TYPE_CHECKING:
from crewai.tools.base_tool import BaseTool
def build_schema_hint(args_schema: type[BaseModel]) -> str:
"""Build a human-readable hint from a Pydantic model's JSON schema.
Args:
args_schema: The Pydantic model class to extract schema from.
Returns:
A formatted string with expected arguments and required fields,
or empty string if schema extraction fails.
"""
try:
schema = args_schema.model_json_schema()
return (
f"\nExpected arguments: "
f"{json.dumps(schema.get('properties', {}))}"
f"\nRequired: {json.dumps(schema.get('required', []))}"
)
except Exception:
return ""
class ToolUsageLimitExceededError(Exception):
"""Exception raised when a tool has reached its maximum usage limit."""
@@ -208,7 +229,8 @@ class CrewStructuredTool:
validated_args = self.args_schema.model_validate(raw_args)
return validated_args.model_dump()
except Exception as e:
raise ValueError(f"Arguments validation failed: {e}") from e
hint = build_schema_hint(self.args_schema)
raise ValueError(f"Arguments validation failed: {e}{hint}") from e
async def ainvoke(
self,

View File

@@ -139,7 +139,11 @@ def render_text_description_and_args(
def convert_tools_to_openai_schema(
tools: Sequence[BaseTool | CrewStructuredTool],
) -> tuple[list[dict[str, Any]], dict[str, Callable[..., Any]]]:
) -> tuple[
list[dict[str, Any]],
dict[str, Callable[..., Any]],
dict[str, BaseTool | CrewStructuredTool],
]:
"""Convert CrewAI tools to OpenAI function calling format.
This function converts CrewAI BaseTool and CrewStructuredTool objects
@@ -152,16 +156,12 @@ def convert_tools_to_openai_schema(
Returns:
Tuple containing:
- List of OpenAI-format tool schema dictionaries
- Dict mapping tool names to their callable run() methods
Example:
>>> tools = [CalculatorTool(), SearchTool()]
>>> schemas, functions = convert_tools_to_openai_schema(tools)
>>> # schemas can be passed to llm.call(tools=schemas)
>>> # functions can be passed to llm.call(available_functions=functions)
- Dict mapping sanitized tool names to their callable run() methods
- Dict mapping sanitized tool names to their original tool objects
"""
openai_tools: list[dict[str, Any]] = []
available_functions: dict[str, Callable[..., Any]] = {}
tool_name_mapping: dict[str, BaseTool | CrewStructuredTool] = {}
for tool in tools:
# Get the JSON schema for tool parameters
@@ -186,6 +186,14 @@ def convert_tools_to_openai_schema(
sanitized_name = sanitize_tool_name(tool.name)
if sanitized_name in available_functions:
counter = 2
candidate = sanitize_tool_name(f"{sanitized_name}_{counter}")
while candidate in available_functions:
counter += 1
candidate = sanitize_tool_name(f"{sanitized_name}_{counter}")
sanitized_name = candidate
schema: dict[str, Any] = {
"type": "function",
"function": {
@@ -197,8 +205,9 @@ def convert_tools_to_openai_schema(
}
openai_tools.append(schema)
available_functions[sanitized_name] = tool.run # type: ignore[union-attr]
tool_name_mapping[sanitized_name] = tool
return openai_tools, available_functions
return openai_tools, available_functions, tool_name_mapping
def has_reached_max_iterations(iterations: int, max_iterations: int) -> bool:

View File

@@ -2,6 +2,7 @@
# https://github.com/un33k/python-slugify
# MIT License
import hashlib
import re
from typing import Any, Final
import unicodedata
@@ -40,7 +41,9 @@ def sanitize_tool_name(name: str, max_length: int = _MAX_TOOL_NAME_LENGTH) -> st
name = name.strip("_")
if len(name) > max_length:
name = name[:max_length].rstrip("_")
name_hash = hashlib.sha256(name.encode()).hexdigest()[:8]
suffix = f"_{name_hash}"
name = name[: max_length - len(suffix)].rstrip("_") + suffix
return name

View File

@@ -1184,7 +1184,7 @@ class TestNativeToolCallingJsonParseError:
executor = self._make_executor([tool])
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
_, available_functions = convert_tools_to_openai_schema([tool])
_, available_functions, _ = convert_tools_to_openai_schema([tool])
malformed_json = '{"code": "print("hello")"}'
@@ -1212,7 +1212,7 @@ class TestNativeToolCallingJsonParseError:
executor = self._make_executor([tool])
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
_, available_functions = convert_tools_to_openai_schema([tool])
_, available_functions, _ = convert_tools_to_openai_schema([tool])
valid_json = '{"code": "print(1)"}'
@@ -1239,7 +1239,7 @@ class TestNativeToolCallingJsonParseError:
executor = self._make_executor([tool])
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
_, available_functions = convert_tools_to_openai_schema([tool])
_, available_functions, _ = convert_tools_to_openai_schema([tool])
result = executor._execute_single_native_tool_call(
call_id="call_789",
@@ -1265,7 +1265,7 @@ class TestNativeToolCallingJsonParseError:
executor = self._make_executor([tool])
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
_, available_functions = convert_tools_to_openai_schema([tool])
_, available_functions, _ = convert_tools_to_openai_schema([tool])
result = executor._execute_single_native_tool_call(
call_id="call_schema",

View File

@@ -121,3 +121,41 @@ def test_telemetry_singleton_pattern():
thread.join()
assert all(instance is telemetry1 for instance in instances)
def test_no_signal_handler_traceback_in_non_main_thread():
"""Signal handler registration should be silently skipped in non-main threads.
Regression test for https://github.com/crewAIInc/crewAI/issues/4289
"""
errors: list[Exception] = []
mock_holder: dict = {}
def init_in_thread():
try:
Telemetry._instance = None
with (
patch.dict(
os.environ,
{"CREWAI_DISABLE_TELEMETRY": "false", "OTEL_SDK_DISABLED": "false"},
),
patch("crewai.telemetry.telemetry.TracerProvider"),
patch("signal.signal") as mock_signal,
patch("crewai.telemetry.telemetry.logger") as mock_logger,
):
Telemetry()
mock_holder["signal"] = mock_signal
mock_holder["logger"] = mock_logger
except Exception as exc:
errors.append(exc)
thread = threading.Thread(target=init_in_thread)
thread.start()
thread.join()
assert not errors, f"Unexpected error: {errors}"
assert mock_holder, "Thread did not execute"
mock_holder["signal"].assert_not_called()
mock_holder["logger"].debug.assert_any_call(
"Skipping signal handler registration: not running in main thread"
)

View File

@@ -80,7 +80,7 @@ class TestConvertToolsToOpenaiSchema:
def test_converts_single_tool(self) -> None:
"""Test converting a single tool to OpenAI schema."""
tools = [CalculatorTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
schemas, functions, _ = convert_tools_to_openai_schema(tools)
assert len(schemas) == 1
assert len(functions) == 1
@@ -95,7 +95,7 @@ class TestConvertToolsToOpenaiSchema:
def test_converts_multiple_tools(self) -> None:
"""Test converting multiple tools to OpenAI schema."""
tools = [CalculatorTool(), SearchTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
schemas, functions, _ = convert_tools_to_openai_schema(tools)
assert len(schemas) == 2
assert len(functions) == 2
@@ -113,7 +113,7 @@ class TestConvertToolsToOpenaiSchema:
def test_functions_dict_contains_callables(self) -> None:
"""Test that the functions dict maps names to callable run methods."""
tools = [CalculatorTool(), SearchTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
schemas, functions, _ = convert_tools_to_openai_schema(tools)
assert "calculator" in functions
assert "web_search" in functions
@@ -123,14 +123,14 @@ class TestConvertToolsToOpenaiSchema:
def test_function_can_be_called(self) -> None:
"""Test that the returned function can be called."""
tools = [CalculatorTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
schemas, functions, _ = convert_tools_to_openai_schema(tools)
result = functions["calculator"](expression="2 + 2")
assert result == "4"
def test_empty_tools_list(self) -> None:
"""Test with an empty tools list."""
schemas, functions = convert_tools_to_openai_schema([])
schemas, functions, _ = convert_tools_to_openai_schema([])
assert schemas == []
assert functions == {}
@@ -138,7 +138,7 @@ class TestConvertToolsToOpenaiSchema:
def test_schema_has_required_fields(self) -> None:
"""Test that the schema includes required fields information."""
tools = [SearchTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
schemas, functions, _ = convert_tools_to_openai_schema(tools)
schema = schemas[0]
params = schema["function"]["parameters"]
@@ -158,7 +158,7 @@ class TestConvertToolsToOpenaiSchema:
return "done"
tools = [MinimalTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
schemas, functions, _ = convert_tools_to_openai_schema(tools)
assert len(schemas) == 1
schema = schemas[0]
@@ -169,7 +169,7 @@ class TestConvertToolsToOpenaiSchema:
def test_schema_structure_matches_openai_format(self) -> None:
"""Test that the schema structure matches OpenAI's expected format."""
tools = [CalculatorTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
schemas, functions, _ = convert_tools_to_openai_schema(tools)
schema = schemas[0]
@@ -194,7 +194,7 @@ class TestConvertToolsToOpenaiSchema:
def test_removes_redundant_schema_fields(self) -> None:
"""Test that redundant title and description are removed from parameters."""
tools = [CalculatorTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
schemas, functions, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
# Title should be removed as it's redundant with function name
@@ -203,7 +203,7 @@ class TestConvertToolsToOpenaiSchema:
def test_preserves_field_descriptions(self) -> None:
"""Test that field descriptions are preserved in the schema."""
tools = [SearchTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
schemas, functions, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
query_prop = params["properties"]["query"]
@@ -215,7 +215,7 @@ class TestConvertToolsToOpenaiSchema:
def test_preserves_default_values(self) -> None:
"""Test that default values are preserved in the schema."""
tools = [SearchTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
schemas, functions, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
max_results_prop = params["properties"]["max_results"]
@@ -265,7 +265,7 @@ class TestOptionalFieldsPreserveNull:
"""Optional[str] fields should include null in the schema so the LLM
can send null instead of being forced to guess a value."""
tools = [MCPStyleTool()]
schemas, _ = convert_tools_to_openai_schema(tools)
schemas, _, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
page_id_prop = params["properties"]["page_id"]
@@ -278,7 +278,7 @@ class TestOptionalFieldsPreserveNull:
def test_optional_literal_allows_null(self) -> None:
"""Optional[Literal[...]] fields should include null."""
tools = [MCPStyleTool()]
schemas, _ = convert_tools_to_openai_schema(tools)
schemas, _, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
filter_prop = params["properties"]["filter_type"]
@@ -290,7 +290,7 @@ class TestOptionalFieldsPreserveNull:
def test_required_field_stays_non_null(self) -> None:
"""Required fields without Optional should NOT have null."""
tools = [MCPStyleTool()]
schemas, _ = convert_tools_to_openai_schema(tools)
schemas, _, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
query_prop = params["properties"]["query"]
@@ -301,7 +301,7 @@ class TestOptionalFieldsPreserveNull:
def test_all_fields_in_required_for_strict_mode(self) -> None:
"""All fields (including optional) must be in required for strict mode."""
tools = [MCPStyleTool()]
schemas, _ = convert_tools_to_openai_schema(tools)
schemas, _, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
assert "query" in params["required"]