From c216b310e3f1eab7b24e3761b13d950eac63261d Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 01:56:56 +0000 Subject: [PATCH] Fix response_format validation for OpenAI-compatible models - Allow OpenAI provider models to use response_format without validation errors - Add comprehensive tests for OpenAI-compatible models like qwen-plus - Ensure non-OpenAI providers still use original validation logic - Fixes issue #3174 where structured output failed for third-party OpenAI-compatible models Co-Authored-By: Jo\u00E3o --- src/crewai/llm.py | 20 ++++++++++++-------- tests/llm_test.py | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 8 deletions(-) diff --git a/src/crewai/llm.py b/src/crewai/llm.py index d6f40a09a..e15734fb0 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -1090,14 +1090,18 @@ class LLM(BaseLLM): - If no slash is present, "openai" is assumed. """ provider = self._get_custom_llm_provider() - if self.response_format is not None and not supports_response_schema( - model=self.model, - custom_llm_provider=provider, - ): - raise ValueError( - f"The model {self.model} does not support response_format for provider '{provider}'. " - "Please remove response_format or use a supported model." - ) + if self.response_format is not None: + if provider == "openai": + return + + if not supports_response_schema( + model=self.model, + custom_llm_provider=provider, + ): + raise ValueError( + f"The model {self.model} does not support response_format for provider '{provider}'. " + "Please remove response_format or use a supported model." + ) def supports_function_calling(self) -> bool: try: diff --git a/tests/llm_test.py b/tests/llm_test.py index 1065876af..ae9b05a54 100644 --- a/tests/llm_test.py +++ b/tests/llm_test.py @@ -259,6 +259,41 @@ def test_validate_call_params_no_response_format(): llm._validate_call_params() +def test_openai_compatible_models_response_format(): + """Test that OpenAI-compatible models support response_format without validation errors.""" + from pydantic import BaseModel + + class TestResponse(BaseModel): + content: str + + llm = LLM(model="openai/qwen-plus", response_format=TestResponse) + llm._validate_call_params() + + llm = LLM(model="openai/custom-model", response_format=TestResponse) + llm._validate_call_params() + + llm = LLM(model="openai/gpt-4", response_format=TestResponse) + llm._validate_call_params() + + +def test_non_openai_providers_still_use_validation(): + """Test that non-OpenAI providers still use the original validation logic.""" + from pydantic import BaseModel + + class TestResponse(BaseModel): + content: str + + with patch("crewai.llm.supports_response_schema", return_value=False): + llm = LLM(model="gemini/gemini-1.5-pro", response_format=TestResponse) + with pytest.raises(ValueError) as excinfo: + llm._validate_call_params() + assert "does not support response_format" in str(excinfo.value) + + with patch("crewai.llm.supports_response_schema", return_value=True): + llm = LLM(model="anthropic/claude-3", response_format=TestResponse) + llm._validate_call_params() + + @pytest.mark.vcr(filter_headers=["authorization"], filter_query_parameters=["key"]) @pytest.mark.parametrize( "model",