From e0458132f5c36e7a724b53609f40c0923718e10c Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Wed, 5 Mar 2025 00:41:32 +0000 Subject: [PATCH] Fix output_json parameter with custom_openai backends Co-Authored-By: Joe Moura --- src/crewai/utilities/converter.py | 16 ++++- tests/utilities/test_custom_openai_json.py | 74 ++++++++++++++++++++++ 2 files changed, 89 insertions(+), 1 deletion(-) create mode 100644 tests/utilities/test_custom_openai_json.py diff --git a/src/crewai/utilities/converter.py b/src/crewai/utilities/converter.py index 991185f4a..8aea317c1 100644 --- a/src/crewai/utilities/converter.py +++ b/src/crewai/utilities/converter.py @@ -72,7 +72,21 @@ class Converter(OutputConverter): """Convert text to json.""" try: if self.llm.supports_function_calling(): - return self._create_instructor().to_json() + try: + return self._create_instructor().to_json() + except Exception as e: + # Check if this is the specific Instructor error for multiple tool calls + if "Instructor does not support multiple tool calls, use List[Model] instead" in str(e): + # Fall back to non-function calling approach for custom OpenAI backends + return json.dumps( + self.llm.call( + [ + {"role": "system", "content": self.instructions}, + {"role": "user", "content": self.text}, + ] + ) + ) + raise e else: return json.dumps( self.llm.call( diff --git a/tests/utilities/test_custom_openai_json.py b/tests/utilities/test_custom_openai_json.py new file mode 100644 index 000000000..c712558cc --- /dev/null +++ b/tests/utilities/test_custom_openai_json.py @@ -0,0 +1,74 @@ +import json +import pytest +from unittest.mock import Mock, patch + +from pydantic import BaseModel + +from crewai.utilities.converter import Converter +from crewai.llm import LLM + + +class SimpleModel(BaseModel): + name: str + age: int + + +class TestCustomOpenAIJson: + def test_custom_openai_json_conversion_with_instructor_error(self): + """Test that JSON conversion works with custom OpenAI backends when Instructor raises an error.""" + # Mock LLM that supports function calling + llm = Mock(spec=LLM) + llm.supports_function_calling.return_value = True + llm.call.return_value = '{"name": "John", "age": 30}' + + # Mock Instructor that raises the specific error + mock_instructor = Mock() + mock_instructor.to_json.side_effect = Exception( + "Instructor does not support multiple tool calls, use List[Model] instead" + ) + + # Create converter with mocked dependencies + converter = Converter( + llm=llm, + text="Convert this to JSON", + model=SimpleModel, + instructions="Convert to JSON", + ) + + # Mock the _create_instructor method to return our mocked instructor + with patch.object(converter, '_create_instructor', return_value=mock_instructor): + # Call to_json method + result = converter.to_json() + + # Verify that the fallback mechanism was used + llm.call.assert_called_once() + # The result is a JSON string, so we need to parse it + parsed_result = json.loads(result) + assert parsed_result == '{"name": "John", "age": 30}' or parsed_result == {"name": "John", "age": 30} + + def test_custom_openai_json_conversion_without_error(self): + """Test that JSON conversion works normally when Instructor doesn't raise an error.""" + # Mock LLM that supports function calling + llm = Mock(spec=LLM) + llm.supports_function_calling.return_value = True + + # Mock Instructor that returns JSON without error + mock_instructor = Mock() + mock_instructor.to_json.return_value = '{"name": "John", "age": 30}' + + # Create converter with mocked dependencies + converter = Converter( + llm=llm, + text="Convert this to JSON", + model=SimpleModel, + instructions="Convert to JSON", + ) + + # Mock the _create_instructor method to return our mocked instructor + with patch.object(converter, '_create_instructor', return_value=mock_instructor): + # Call to_json method + result = converter.to_json() + + # Verify that the normal path was used (no fallback) + llm.call.assert_not_called() + assert json.loads(result) == {"name": "John", "age": 30}