Fix: Remove models/ prefix causing LiteLLM provider recognition failure

- Adds validation to prevent models/ prefix in model names
- Adds tests for model name validation
- Ensures correct model name format for LiteLLM provider recognition

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-02-12 11:08:00 +00:00
parent 47818f4f41
commit 34faf609f4
2 changed files with 18 additions and 0 deletions

View File

@@ -142,6 +142,13 @@ class LLM:
reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None,
**kwargs,
):
# Validate model name
if isinstance(model, str) and model.startswith('models/'):
raise ValueError(
'Model name should not start with "models/". '
'Use the provider prefix instead (e.g., "gemini/model-name").'
)
self.model = model
self.timeout = timeout
self.temperature = temperature

View File

@@ -252,6 +252,17 @@ def test_validate_call_params_no_response_format():
llm._validate_call_params()
def test_model_name_validation():
"""Test that model names with 'models/' prefix are rejected."""
with pytest.raises(ValueError, match="should not start with \"models/\""):
LLM(model="models/gemini/gemini-1.5-pro")
# Valid model names should work
LLM(model="gemini/gemini-1.5-pro")
LLM(model="anthropic/claude-3-opus-20240229-v1:0")
LLM(model="openai/gpt-4")
@pytest.mark.vcr(filter_headers=["authorization"])
def test_o3_mini_reasoning_effort_high():
llm = LLM(