mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
Address PR review feedback: Add error handling, type validation, and edge case tests
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -112,4 +112,102 @@ model_list = [
|
||||
]
|
||||
```
|
||||
|
||||
## Error Handling and Troubleshooting
|
||||
|
||||
When working with multiple model configurations, you may encounter various issues. Here are some common problems and their solutions:
|
||||
|
||||
### Missing Required Parameters
|
||||
|
||||
**Problem**: Router initialization fails with an error about missing parameters.
|
||||
|
||||
**Solution**: Ensure each model configuration in `model_list` includes both `model_name` and `litellm_params` with the required `model` parameter:
|
||||
|
||||
```python
|
||||
# Correct configuration
|
||||
model_config = {
|
||||
"model_name": "gpt-4o-mini", # Required
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini", # Required
|
||||
"api_key": "your-api-key"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Invalid Routing Strategy
|
||||
|
||||
**Problem**: Error when specifying an unsupported routing strategy.
|
||||
|
||||
**Solution**: Use only the supported routing strategies:
|
||||
|
||||
```python
|
||||
# Valid routing strategies
|
||||
valid_strategies = [
|
||||
"simple-shuffle",
|
||||
"least-busy",
|
||||
"usage-based",
|
||||
"latency-based",
|
||||
"cost-based"
|
||||
]
|
||||
```
|
||||
|
||||
### API Key Authentication Errors
|
||||
|
||||
**Problem**: Authentication errors when making API calls.
|
||||
|
||||
**Solution**: Verify that all API keys are valid and have the necessary permissions:
|
||||
|
||||
```python
|
||||
# Check environment variables first
|
||||
import os
|
||||
os.environ.get("OPENAI_API_KEY") # Should be set if using OpenAI models
|
||||
|
||||
# Or explicitly provide in the configuration
|
||||
model_list = [{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini",
|
||||
"api_key": "valid-api-key-here" # Ensure this is correct
|
||||
}
|
||||
}]
|
||||
```
|
||||
|
||||
### Rate Limit Handling
|
||||
|
||||
**Problem**: Encountering rate limits with multiple models.
|
||||
|
||||
**Solution**: Configure rate limits and implement fallback mechanisms:
|
||||
|
||||
```python
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "primary-model",
|
||||
"litellm_params": {"model": "primary-model", "api_key": "key1"},
|
||||
"rpm": 100 # Requests per minute
|
||||
},
|
||||
{
|
||||
"model_name": "fallback-model",
|
||||
"litellm_params": {"model": "fallback-model", "api_key": "key2"}
|
||||
}
|
||||
]
|
||||
|
||||
# Configure with fallback
|
||||
llm = LLM(
|
||||
model="primary-model",
|
||||
model_list=model_list,
|
||||
routing_strategy="least-busy" # Will route to fallback when primary is busy
|
||||
)
|
||||
```
|
||||
|
||||
### Debugging Router Issues
|
||||
|
||||
If you're experiencing issues with the router, you can enable verbose logging to get more information:
|
||||
|
||||
```python
|
||||
import litellm
|
||||
litellm.set_verbose = True
|
||||
|
||||
# Then initialize your LLM
|
||||
llm = LLM(model="gpt-4o-mini", model_list=model_list)
|
||||
```
|
||||
|
||||
This feature leverages litellm's Router functionality under the hood, providing robust load balancing and fallback capabilities for your CrewAI agents. The implementation ensures predictability and consistency in model selection while maintaining security through proper API key management.
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Literal, Optional, Union
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator, field_validator
|
||||
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
@@ -88,11 +89,18 @@ class Agent(BaseAgent):
|
||||
function_calling_llm: Optional[Any] = Field(
|
||||
description="Language model that will handle function calling for the agent.", default=None
|
||||
)
|
||||
class RoutingStrategy(str, Enum):
|
||||
SIMPLE_SHUFFLE = "simple-shuffle"
|
||||
LEAST_BUSY = "least-busy"
|
||||
USAGE_BASED = "usage-based"
|
||||
LATENCY_BASED = "latency-based"
|
||||
COST_BASED = "cost-based"
|
||||
|
||||
model_list: Optional[List[Dict[str, Any]]] = Field(
|
||||
default=None, description="List of model configurations for routing between multiple models."
|
||||
)
|
||||
routing_strategy: Optional[str] = Field(
|
||||
default=None, description="Strategy for routing between multiple models (e.g., 'simple-shuffle', 'least-busy', 'usage-based', 'latency-based')."
|
||||
routing_strategy: Optional[RoutingStrategy] = Field(
|
||||
default=None, description="Strategy for routing between multiple models (e.g., 'simple-shuffle', 'least-busy', 'usage-based', 'latency-based', 'cost-based')."
|
||||
)
|
||||
system_template: Optional[str] = Field(
|
||||
default=None, description="System format for the agent."
|
||||
|
||||
@@ -9,11 +9,15 @@ from typing import Any, Dict, List, Optional, Union
|
||||
import litellm
|
||||
from litellm import Router as LiteLLMRouter
|
||||
from litellm import get_supported_openai_params
|
||||
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||
|
||||
from crewai.utilities.logger import Logger
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
|
||||
logger = Logger(verbose=True)
|
||||
|
||||
|
||||
class FilteredStream:
|
||||
def __init__(self, original_stream):
|
||||
@@ -155,14 +159,34 @@ class LLM:
|
||||
"""
|
||||
Initialize the litellm Router with the provided model_list and routing_strategy.
|
||||
"""
|
||||
router_kwargs = {}
|
||||
if self.routing_strategy:
|
||||
router_kwargs["routing_strategy"] = self.routing_strategy
|
||||
try:
|
||||
router_kwargs = {}
|
||||
if self.routing_strategy:
|
||||
valid_strategies = ["simple-shuffle", "least-busy", "usage-based", "latency-based", "cost-based"]
|
||||
if self.routing_strategy not in valid_strategies:
|
||||
raise ValueError(f"Invalid routing strategy: {self.routing_strategy}. Valid options are: {', '.join(valid_strategies)}")
|
||||
router_kwargs["routing_strategy"] = self.routing_strategy
|
||||
|
||||
self.router = LiteLLMRouter(
|
||||
model_list=self.model_list,
|
||||
**router_kwargs
|
||||
)
|
||||
except Exception as e:
|
||||
logger.log("error", f"Failed to initialize router: {str(e)}")
|
||||
raise RuntimeError(f"Router initialization failed: {str(e)}")
|
||||
|
||||
self.router = LiteLLMRouter(
|
||||
model_list=self.model_list,
|
||||
**router_kwargs
|
||||
)
|
||||
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
|
||||
def _execute_router_call(self, params):
|
||||
"""
|
||||
Execute a call to the router with retry logic for handling transient issues.
|
||||
|
||||
Args:
|
||||
params: Parameters to pass to the router completion method
|
||||
|
||||
Returns:
|
||||
The response from the router
|
||||
"""
|
||||
return self.router.completion(model=self.model, **params)
|
||||
|
||||
def call(self, messages: List[Dict[str, str]], callbacks: List[Any] = []) -> str:
|
||||
with suppress_warnings():
|
||||
@@ -193,10 +217,7 @@ class LLM:
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
if self.router:
|
||||
response = self.router.completion(
|
||||
model=self.model,
|
||||
**params
|
||||
)
|
||||
response = self._execute_router_call(params)
|
||||
else:
|
||||
params.update({
|
||||
"model": self.model,
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
from crewai.llm import LLM
|
||||
from crewai.agent import Agent
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -175,3 +173,74 @@ def test_llm_call_without_router(mock_completion):
|
||||
|
||||
mock_completion.assert_called_once()
|
||||
assert response == "Test response"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_with_invalid_routing_strategy():
|
||||
"""Test that LLM initialization raises an error with an invalid routing strategy."""
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini",
|
||||
"api_key": "test-key-1"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
with pytest.raises(RuntimeError) as exc_info:
|
||||
LLM(
|
||||
model="gpt-4o-mini",
|
||||
model_list=model_list,
|
||||
routing_strategy="invalid-strategy"
|
||||
)
|
||||
|
||||
assert "Invalid routing strategy" in str(exc_info.value)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_with_invalid_routing_strategy():
|
||||
"""Test that Agent initialization raises an error with an invalid routing strategy."""
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini",
|
||||
"api_key": "test-key-1"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
Agent(
|
||||
role="test",
|
||||
goal="test",
|
||||
backstory="test",
|
||||
model_list=model_list,
|
||||
routing_strategy="invalid-strategy"
|
||||
)
|
||||
|
||||
assert "Input should be" in str(exc_info.value)
|
||||
assert "simple-shuffle" in str(exc_info.value)
|
||||
assert "least-busy" in str(exc_info.value)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@patch.object(LLM, '_initialize_router')
|
||||
def test_llm_with_missing_model_in_litellm_params(mock_initialize_router):
|
||||
"""Test that LLM initialization raises an error when model is missing in litellm_params."""
|
||||
mock_initialize_router.side_effect = RuntimeError("Router initialization failed: Missing required 'model' in litellm_params")
|
||||
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"api_key": "test-key-1"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
with pytest.raises(RuntimeError) as exc_info:
|
||||
LLM(model="gpt-4o-mini", model_list=model_list)
|
||||
|
||||
assert "Router initialization failed" in str(exc_info.value)
|
||||
|
||||
Reference in New Issue
Block a user