diff --git a/src/crewai/llm.py b/src/crewai/llm.py index 85277b38f..a80a9afd9 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -270,6 +270,7 @@ class LLM(BaseLLM): callbacks: List[Any] = [], reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None, stream: bool = False, + force_structured_output: bool = False, **kwargs, ): self.model = model @@ -296,6 +297,7 @@ class LLM(BaseLLM): self.additional_params = kwargs self.is_anthropic = self._is_anthropic_model(model) self.stream = stream + self.force_structured_output = force_structured_output litellm.drop_params = True @@ -992,9 +994,11 @@ class LLM(BaseLLM): - If no slash is present, "openai" is assumed. """ provider = self._get_custom_llm_provider() - if self.response_format is not None and not supports_response_schema( - model=self.model, - custom_llm_provider=provider, + if self.response_format is not None and not ( + supports_response_schema( + model=self.model, + custom_llm_provider=provider, + ) or (provider == "openrouter" and self.force_structured_output) ): raise ValueError( f"The model {self.model} does not support response_format for provider '{provider}'. " diff --git a/tests/llm_test.py b/tests/llm_test.py index f80637c60..adea8da58 100644 --- a/tests/llm_test.py +++ b/tests/llm_test.py @@ -256,6 +256,32 @@ def test_validate_call_params_no_response_format(): llm._validate_call_params() +def test_validate_call_params_openrouter_force_structured_output(): + class DummyResponse(BaseModel): + a: int + + # Test with OpenRouter and force_structured_output=True + llm = LLM( + model="openrouter/deepseek/deepseek-chat", + response_format=DummyResponse, + force_structured_output=True + ) + # Should not raise any error with force_structured_output=True + llm._validate_call_params() + + # Test with OpenRouter and force_structured_output=False (default) + # Patch supports_response_schema to simulate an unsupported model. + with patch("crewai.llm.supports_response_schema", return_value=False): + llm = LLM( + model="openrouter/deepseek/deepseek-chat", + response_format=DummyResponse, + force_structured_output=False + ) + with pytest.raises(ValueError) as excinfo: + llm._validate_call_params() + assert "does not support response_format" in str(excinfo.value) + + @pytest.mark.vcr(filter_headers=["authorization"], filter_query_parameters=["key"]) @pytest.mark.parametrize( "model",