diff --git a/src/crewai/llm.py b/src/crewai/llm.py index 6b0ce8ac0..ef904fffd 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -305,8 +305,6 @@ class LLM: Args: messages: Input messages for the LLM tools: Optional list of tool schemas - callbacks: Optional list of callback functions - available_functions: Optional dict of available functions Returns: Dict[str, Any]: Parameters for the completion call @@ -317,16 +315,53 @@ class LLM: formatted_messages = self._format_messages_for_provider(messages) # --- 2) If using Gemini, ensure additionalProperties is not in tool schemas - if tools and "gemini" in self.model.lower(): - for i, tool in enumerate(tools): - if ( - isinstance(tool, dict) - and "function" in tool - and "parameters" in tool["function"] - ): - params = tool["function"]["parameters"] - if "additionalProperties" in params: - del params["additionalProperties"] + self._clean_gemini_tool_parameters(tools) + + # --- 3) Prepare the parameters for the completion call + params = { + "model": self.model, + "messages": formatted_messages, + "timeout": self.timeout, + "temperature": self.temperature, + "top_p": self.top_p, + "n": self.n, + "stop": self.stop, + "max_tokens": self.max_tokens or self.max_completion_tokens, + "presence_penalty": self.presence_penalty, + "frequency_penalty": self.frequency_penalty, + "logit_bias": self.logit_bias, + "response_format": self.response_format, + "seed": self.seed, + "logprobs": self.logprobs, + "top_logprobs": self.top_logprobs, + "api_base": self.api_base, + "base_url": self.base_url, + "api_version": self.api_version, + "api_key": self.api_key, + "stream": self.stream, + "tools": tools, + "reasoning_effort": self.reasoning_effort, + **self.additional_params, + } + + # Remove None values from params + return {k: v for k, v in params.items() if v is not None} + + def _clean_gemini_tool_parameters( + self, tools: Optional[List[dict]] + ) -> None: + """Remove additionalProperties from tool parameters for Gemini compatibility. + + Args: + tools: List of tool dictionaries that may contain function schemas + """ + if not tools or "gemini" not in self.model.lower(): + return + + for tool in tools: + if isinstance(tool, dict) and "function" in tool: + params = tool["function"].get("parameters", {}) + params.pop("additionalProperties", None) # --- 3) Prepare the parameters for the completion call params = { diff --git a/src/crewai/tools/structured_tool.py b/src/crewai/tools/structured_tool.py index 200a4ee4a..2697a3212 100644 --- a/src/crewai/tools/structured_tool.py +++ b/src/crewai/tools/structured_tool.py @@ -2,7 +2,7 @@ from __future__ import annotations import inspect import textwrap -from typing import Any, Callable, Optional, Union, get_type_hints +from typing import Any, Callable, Dict, List, Optional, Type, Union, get_type_hints from pydantic import BaseModel, Field, create_model @@ -240,25 +240,36 @@ class CrewStructuredTool: """Get the tool's input arguments schema.""" return self.args_schema.model_json_schema()["properties"] - def to_openai_function(self) -> dict: + def to_openai_function(self) -> Dict[str, Any]: """Convert the tool to an OpenAI function format. Returns: - dict: A dictionary in the OpenAI function format. + Dict[str, Any]: A dictionary in the OpenAI function format. + + Example: + ```python + tool = CrewStructuredTool(...) + function_dict = tool.to_openai_function() + # Use with OpenAI or compatible APIs + ``` + + Raises: + ValueError: If the schema conversion fails """ - schema = self.args_schema.model_json_schema() - # Remove additionalProperties field to prevent Gemini API errors - if "additionalProperties" in schema: - del schema["additionalProperties"] - - return { - "type": "function", - "function": { - "name": self.name, - "description": self.description, - "parameters": schema + try: + schema = self.args_schema.model_json_schema() + schema.pop("additionalProperties", None) + + return { + "type": "function", + "function": { + "name": self.name, + "description": self.description, + "parameters": schema + } } - } + except Exception as e: + raise ValueError(f"Failed to convert tool to OpenAI function format: {str(e)}") def __repr__(self) -> str: return ( diff --git a/tests/tools/test_structured_tool.py b/tests/tools/test_structured_tool.py index de4a5ee64..47b5b91dc 100644 --- a/tests/tools/test_structured_tool.py +++ b/tests/tools/test_structured_tool.py @@ -171,3 +171,28 @@ class TestInternalCrewStructuredTool: assert function_dict["function"]["description"] == "A test tool" assert "properties" in function_dict["function"]["parameters"] assert "test_field" in function_dict["function"]["parameters"]["properties"] + + def test_to_openai_function_edge_cases(self): + """Test edge cases for to_openai_function conversion.""" + class EmptySchema(BaseModel): + pass + + def empty_func() -> None: + pass + + tool = CrewStructuredTool( + name="empty_tool", + description="A tool with empty schema", + args_schema=EmptySchema, + func=empty_func + ) + + function_dict = tool.to_openai_function() + assert function_dict["type"] == "function" + assert function_dict["function"]["name"] == "empty_tool" + + # Check that parameters contains the expected fields + params = function_dict["function"]["parameters"] + assert params["title"] == "EmptySchema" + assert params["type"] == "object" + assert "properties" in params # Empty schema still has a properties field