Fix segmentation fault in Python 3.11 tests by improving provider comparison and replacing logging with print

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-04-30 12:07:57 +00:00
parent d636593359
commit 0173a3ceaf

View File

@@ -1031,25 +1031,23 @@ class LLM(BaseLLM):
return return
provider = self._get_custom_llm_provider() provider = self._get_custom_llm_provider()
provider_lower = provider.lower() if provider else ""
# Check if we're bypassing validation for OpenRouter # Check if we're bypassing validation for OpenRouter
is_openrouter_bypass = ( is_openrouter = provider and provider.lower() == OPENROUTER_PROVIDER.lower()
provider_lower == OPENROUTER_PROVIDER.lower() and self.force_structured_output is_openrouter_bypass = is_openrouter and self.force_structured_output
)
if is_openrouter_bypass:
logging.warning(
f"Forcing structured output for OpenRouter model {self.model}. "
"Please ensure the model supports the expected response format."
)
# Check if the model supports response schema # Check if the model supports response schema
is_schema_supported = supports_response_schema( is_schema_supported = supports_response_schema(
model=self.model, model=self.model,
custom_llm_provider=provider, custom_llm_provider=provider,
) )
if is_openrouter_bypass:
print(
f"Warning: Forcing structured output for OpenRouter model {self.model}. "
"Please ensure the model supports the expected response format."
)
if not (is_schema_supported or is_openrouter_bypass): if not (is_schema_supported or is_openrouter_bypass):
raise ValueError( raise ValueError(
f"The model {self.model} does not support response_format for provider '{provider}'. " f"The model {self.model} does not support response_format for provider '{provider}'. "