mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 00:28:31 +00:00
Fix output_json parameter with custom_openai backends
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -72,7 +72,21 @@ class Converter(OutputConverter):
|
|||||||
"""Convert text to json."""
|
"""Convert text to json."""
|
||||||
try:
|
try:
|
||||||
if self.llm.supports_function_calling():
|
if self.llm.supports_function_calling():
|
||||||
|
try:
|
||||||
return self._create_instructor().to_json()
|
return self._create_instructor().to_json()
|
||||||
|
except Exception as e:
|
||||||
|
# Check if this is the specific Instructor error for multiple tool calls
|
||||||
|
if "Instructor does not support multiple tool calls, use List[Model] instead" in str(e):
|
||||||
|
# Fall back to non-function calling approach for custom OpenAI backends
|
||||||
|
return json.dumps(
|
||||||
|
self.llm.call(
|
||||||
|
[
|
||||||
|
{"role": "system", "content": self.instructions},
|
||||||
|
{"role": "user", "content": self.text},
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
raise e
|
||||||
else:
|
else:
|
||||||
return json.dumps(
|
return json.dumps(
|
||||||
self.llm.call(
|
self.llm.call(
|
||||||
|
|||||||
74
tests/utilities/test_custom_openai_json.py
Normal file
74
tests/utilities/test_custom_openai_json.py
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
import json
|
||||||
|
import pytest
|
||||||
|
from unittest.mock import Mock, patch
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from crewai.utilities.converter import Converter
|
||||||
|
from crewai.llm import LLM
|
||||||
|
|
||||||
|
|
||||||
|
class SimpleModel(BaseModel):
|
||||||
|
name: str
|
||||||
|
age: int
|
||||||
|
|
||||||
|
|
||||||
|
class TestCustomOpenAIJson:
|
||||||
|
def test_custom_openai_json_conversion_with_instructor_error(self):
|
||||||
|
"""Test that JSON conversion works with custom OpenAI backends when Instructor raises an error."""
|
||||||
|
# Mock LLM that supports function calling
|
||||||
|
llm = Mock(spec=LLM)
|
||||||
|
llm.supports_function_calling.return_value = True
|
||||||
|
llm.call.return_value = '{"name": "John", "age": 30}'
|
||||||
|
|
||||||
|
# Mock Instructor that raises the specific error
|
||||||
|
mock_instructor = Mock()
|
||||||
|
mock_instructor.to_json.side_effect = Exception(
|
||||||
|
"Instructor does not support multiple tool calls, use List[Model] instead"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create converter with mocked dependencies
|
||||||
|
converter = Converter(
|
||||||
|
llm=llm,
|
||||||
|
text="Convert this to JSON",
|
||||||
|
model=SimpleModel,
|
||||||
|
instructions="Convert to JSON",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock the _create_instructor method to return our mocked instructor
|
||||||
|
with patch.object(converter, '_create_instructor', return_value=mock_instructor):
|
||||||
|
# Call to_json method
|
||||||
|
result = converter.to_json()
|
||||||
|
|
||||||
|
# Verify that the fallback mechanism was used
|
||||||
|
llm.call.assert_called_once()
|
||||||
|
# The result is a JSON string, so we need to parse it
|
||||||
|
parsed_result = json.loads(result)
|
||||||
|
assert parsed_result == '{"name": "John", "age": 30}' or parsed_result == {"name": "John", "age": 30}
|
||||||
|
|
||||||
|
def test_custom_openai_json_conversion_without_error(self):
|
||||||
|
"""Test that JSON conversion works normally when Instructor doesn't raise an error."""
|
||||||
|
# Mock LLM that supports function calling
|
||||||
|
llm = Mock(spec=LLM)
|
||||||
|
llm.supports_function_calling.return_value = True
|
||||||
|
|
||||||
|
# Mock Instructor that returns JSON without error
|
||||||
|
mock_instructor = Mock()
|
||||||
|
mock_instructor.to_json.return_value = '{"name": "John", "age": 30}'
|
||||||
|
|
||||||
|
# Create converter with mocked dependencies
|
||||||
|
converter = Converter(
|
||||||
|
llm=llm,
|
||||||
|
text="Convert this to JSON",
|
||||||
|
model=SimpleModel,
|
||||||
|
instructions="Convert to JSON",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock the _create_instructor method to return our mocked instructor
|
||||||
|
with patch.object(converter, '_create_instructor', return_value=mock_instructor):
|
||||||
|
# Call to_json method
|
||||||
|
result = converter.to_json()
|
||||||
|
|
||||||
|
# Verify that the normal path was used (no fallback)
|
||||||
|
llm.call.assert_not_called()
|
||||||
|
assert json.loads(result) == {"name": "John", "age": 30}
|
||||||
Reference in New Issue
Block a user