Compare commits

...

1 Commits

Author SHA1 Message Date
Devin AI
c216b310e3 Fix response_format validation for OpenAI-compatible models
- Allow OpenAI provider models to use response_format without validation errors
- Add comprehensive tests for OpenAI-compatible models like qwen-plus
- Ensure non-OpenAI providers still use original validation logic
- Fixes issue #3174 where structured output failed for third-party OpenAI-compatible models

Co-Authored-By: Jo\u00E3o <joao@crewai.com>
2025-07-17 01:56:56 +00:00
2 changed files with 47 additions and 8 deletions

View File

@@ -1090,14 +1090,18 @@ class LLM(BaseLLM):
- If no slash is present, "openai" is assumed.
"""
provider = self._get_custom_llm_provider()
if self.response_format is not None and not supports_response_schema(
model=self.model,
custom_llm_provider=provider,
):
raise ValueError(
f"The model {self.model} does not support response_format for provider '{provider}'. "
"Please remove response_format or use a supported model."
)
if self.response_format is not None:
if provider == "openai":
return
if not supports_response_schema(
model=self.model,
custom_llm_provider=provider,
):
raise ValueError(
f"The model {self.model} does not support response_format for provider '{provider}'. "
"Please remove response_format or use a supported model."
)
def supports_function_calling(self) -> bool:
try:

View File

@@ -259,6 +259,41 @@ def test_validate_call_params_no_response_format():
llm._validate_call_params()
def test_openai_compatible_models_response_format():
"""Test that OpenAI-compatible models support response_format without validation errors."""
from pydantic import BaseModel
class TestResponse(BaseModel):
content: str
llm = LLM(model="openai/qwen-plus", response_format=TestResponse)
llm._validate_call_params()
llm = LLM(model="openai/custom-model", response_format=TestResponse)
llm._validate_call_params()
llm = LLM(model="openai/gpt-4", response_format=TestResponse)
llm._validate_call_params()
def test_non_openai_providers_still_use_validation():
"""Test that non-OpenAI providers still use the original validation logic."""
from pydantic import BaseModel
class TestResponse(BaseModel):
content: str
with patch("crewai.llm.supports_response_schema", return_value=False):
llm = LLM(model="gemini/gemini-1.5-pro", response_format=TestResponse)
with pytest.raises(ValueError) as excinfo:
llm._validate_call_params()
assert "does not support response_format" in str(excinfo.value)
with patch("crewai.llm.supports_response_schema", return_value=True):
llm = LLM(model="anthropic/claude-3", response_format=TestResponse)
llm._validate_call_params()
@pytest.mark.vcr(filter_headers=["authorization"], filter_query_parameters=["key"])
@pytest.mark.parametrize(
"model",