Compare commits

...

5 Commits

Author SHA1 Message Date
Devin AI
a5dd576517 fix: Improve model name validation and fix syntax errors
- Fix docstring placement and type hints
- Add proper model name validation with clear error messages
- Organize tests into a class and add edge cases

Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-12 11:17:48 +00:00
Devin AI
bff64ae823 refactor: Improve model name validation
- Add better error messages with model name context
- Add type hints and docstrings
- Add constants for model name validation
- Organize tests into a class
- Add edge case tests

Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-12 11:10:33 +00:00
Devin AI
34faf609f4 Fix: Remove models/ prefix causing LiteLLM provider recognition failure
- Adds validation to prevent models/ prefix in model names
- Adds tests for model name validation
- Ensures correct model name format for LiteLLM provider recognition

Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-12 11:08:00 +00:00
Brandon Hancock (bhancock_ai)
47818f4f41 updating bedrock docs (#2088)
Co-authored-by: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com>
2025-02-10 12:48:12 -05:00
Brandon Hancock (bhancock_ai)
9b10fd47b0 incorporate Small update in memory.mdx, fixing Google AI parameters #2008 (#2087) 2025-02-10 12:17:41 -05:00
3 changed files with 120 additions and 2 deletions

View File

@@ -282,6 +282,19 @@ my_crew = Crew(
### Using Google AI embeddings
#### Prerequisites
Before using Google AI embeddings, ensure you have:
- Access to the Gemini API
- The necessary API keys and permissions
You will need to update your *pyproject.toml* dependencies:
```YAML
dependencies = [
"google-generativeai>=0.8.4", #main version in January/2025 - crewai v.0.100.0 and crewai-tools 0.33.0
"crewai[tools]>=0.100.0,<1.0.0"
]
```
```python Code
from crewai import Crew, Agent, Task, Process
@@ -434,6 +447,38 @@ my_crew = Crew(
)
```
### Using Amazon Bedrock embeddings
```python Code
# Note: Ensure you have installed `boto3` for Bedrock embeddings to work.
import os
import boto3
from crewai import Crew, Agent, Task, Process
boto3_session = boto3.Session(
region_name=os.environ.get("AWS_REGION_NAME"),
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY")
)
my_crew = Crew(
agents=[...],
tasks=[...],
process=Process.sequential,
memory=True,
embedder={
"provider": "bedrock",
"config":{
"session": boto3_session,
"model": "amazon.titan-embed-text-v2:0",
"vector_dimension": 1024
}
}
verbose=True
)
```
### Adding Custom Embedding Function
```python Code

View File

@@ -117,6 +117,39 @@ def suppress_warnings():
class LLM:
"""LLM class for handling model interactions.
Args:
model: The model identifier; should not start with 'models/'.
Examples: 'gemini/gemini-1.5-pro', 'anthropic/claude-3'
timeout: Optional timeout for model calls
temperature: Optional temperature parameter
max_tokens: Optional maximum tokens for completion
max_completion_tokens: Optional maximum completion tokens
logprobs: Optional log probabilities
top_p: Optional nucleus sampling parameter
n: Optional number of completions
stop: Optional stop sequences
presence_penalty: Optional presence penalty
frequency_penalty: Optional frequency penalty
logit_bias: Optional token biasing
user: Optional user identifier
response_format: Optional response format configuration
seed: Optional random seed
tools: Optional list of tools
tool_choice: Optional tool choice configuration
api_base: Optional API base URL
api_key: Optional API key
api_version: Optional API version
base_url: Optional base URL
top_logprobs: Optional top log probabilities
callbacks: Optional list of callbacks
reasoning_effort: Optional reasoning effort level
Raises:
ValueError: If the model name starts with 'models/' or is empty
TypeError: If model is not a string
"""
def __init__(
self,
model: str,
@@ -142,6 +175,20 @@ class LLM:
reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None,
**kwargs,
):
# Constants for model name validation
INVALID_MODEL_PREFIX = "models/"
# Validate model name
if not isinstance(model, str):
raise TypeError("Model name must be a string")
if not model:
raise ValueError("Model name cannot be empty")
if model.startswith(INVALID_MODEL_PREFIX):
raise ValueError(
f'Invalid model name "{model}": Model names should not start with "{INVALID_MODEL_PREFIX}". '
'Use the provider prefix instead (e.g., "gemini/model-name").'
)
self.model = model
self.timeout = timeout
self.temperature = temperature

View File

@@ -252,6 +252,29 @@ def test_validate_call_params_no_response_format():
llm._validate_call_params()
class TestModelNameValidation:
"""Tests for model name validation in LLM class."""
def test_models_prefix_rejection(self):
"""Test that model names with 'models/' prefix are rejected."""
with pytest.raises(ValueError, match="should not start with \"models/\""):
LLM(model="models/gemini/gemini-1.5-pro")
def test_valid_model_names(self):
"""Test that valid model names are accepted."""
LLM(model="gemini/gemini-1.5-pro")
LLM(model="anthropic/claude-3-opus-20240229-v1:0")
LLM(model="openai/gpt-4")
LLM(model="openai/gpt-4 turbo") # Space in model name should work
def test_edge_cases(self):
"""Test edge cases for model name validation."""
with pytest.raises(ValueError, match="cannot be empty"):
LLM(model="") # Empty string
with pytest.raises(TypeError, match="must be a string"):
LLM(model=None) # None value
@pytest.mark.vcr(filter_headers=["authorization"])
def test_o3_mini_reasoning_effort_high():
llm = LLM(
@@ -324,13 +347,16 @@ def test_anthropic_model_detection():
("claude-instant", True),
("claude/v1", True),
("gpt-4", False),
("", False),
("anthropomorphic", False), # Should not match partial words
]
for model, expected in models:
llm = LLM(model=model)
assert llm.is_anthropic == expected, f"Failed for model: {model}"
assert llm._is_anthropic_model(model) == expected, f"Failed for model: {model}"
# Test empty model name separately since it raises ValueError
with pytest.raises(ValueError, match="cannot be empty"):
LLM(model="")
def test_anthropic_message_formatting(anthropic_llm, system_message, user_message):
"""Test Anthropic message formatting with fixtures."""