mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-04-30 14:52:36 +00:00
Compare commits
2 Commits
devin/1739
...
devin/1746
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8476fb2c64 | ||
|
|
8f3162b8e8 |
213
docs/multiple_model_config.md
Normal file
213
docs/multiple_model_config.md
Normal file
@@ -0,0 +1,213 @@
|
||||
# Multiple Model Configuration in CrewAI
|
||||
|
||||
CrewAI now supports configuring multiple language models with different API keys and configurations. This feature allows you to:
|
||||
|
||||
1. Load-balance across multiple model deployments
|
||||
2. Set up fallback models in case of rate limits or errors
|
||||
3. Configure different routing strategies for model selection
|
||||
4. Maintain fine-grained control over model selection and usage
|
||||
|
||||
## Basic Usage
|
||||
|
||||
You can configure multiple models at the agent level:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
|
||||
# Define model configurations
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini", # Required: model name must be specified here
|
||||
"api_key": "your-openai-api-key-1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo", # Required: model name must be specified here
|
||||
"api_key": "your-openai-api-key-2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model_name": "claude-3-sonnet-20240229",
|
||||
"litellm_params": {
|
||||
"model": "claude-3-sonnet-20240229", # Required: model name must be specified here
|
||||
"api_key": "your-anthropic-api-key"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# Create an agent with multiple model configurations
|
||||
agent = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze the data and provide insights",
|
||||
backstory="You are an expert data analyst with years of experience.",
|
||||
model_list=model_list,
|
||||
routing_strategy="simple-shuffle" # Optional routing strategy
|
||||
)
|
||||
```
|
||||
|
||||
## Routing Strategies
|
||||
|
||||
CrewAI supports the following routing strategies for precise control over model selection:
|
||||
|
||||
- `simple-shuffle`: Randomly selects a model from the list
|
||||
- `least-busy`: Routes to the model with the least number of ongoing requests
|
||||
- `usage-based`: Routes based on token usage across models
|
||||
- `latency-based`: Routes to the model with the lowest latency
|
||||
- `cost-based`: Routes to the model with the lowest cost
|
||||
|
||||
Example with latency-based routing:
|
||||
|
||||
```python
|
||||
agent = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze the data and provide insights",
|
||||
backstory="You are an expert data analyst with years of experience.",
|
||||
model_list=model_list,
|
||||
routing_strategy="latency-based"
|
||||
)
|
||||
```
|
||||
|
||||
## Direct LLM Configuration
|
||||
|
||||
You can also configure multiple models directly with the LLM class for more flexibility:
|
||||
|
||||
```python
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="gpt-4o-mini",
|
||||
model_list=model_list,
|
||||
routing_strategy="simple-shuffle"
|
||||
)
|
||||
```
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
For more advanced configurations, you can specify additional parameters for each model to handle complex use cases:
|
||||
|
||||
```python
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini", # Required: model name must be specified here
|
||||
"api_key": "your-openai-api-key-1",
|
||||
"temperature": 0.7
|
||||
},
|
||||
"tpm": 100000, # Tokens per minute limit
|
||||
"rpm": 1000 # Requests per minute limit
|
||||
},
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo", # Required: model name must be specified here
|
||||
"api_key": "your-openai-api-key-2",
|
||||
"temperature": 0.5
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Error Handling and Troubleshooting
|
||||
|
||||
When working with multiple model configurations, you may encounter various issues. Here are some common problems and their solutions:
|
||||
|
||||
### Missing Required Parameters
|
||||
|
||||
**Problem**: Router initialization fails with an error about missing parameters.
|
||||
|
||||
**Solution**: Ensure each model configuration in `model_list` includes both `model_name` and `litellm_params` with the required `model` parameter:
|
||||
|
||||
```python
|
||||
# Correct configuration
|
||||
model_config = {
|
||||
"model_name": "gpt-4o-mini", # Required
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini", # Required
|
||||
"api_key": "your-api-key"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Invalid Routing Strategy
|
||||
|
||||
**Problem**: Error when specifying an unsupported routing strategy.
|
||||
|
||||
**Solution**: Use only the supported routing strategies:
|
||||
|
||||
```python
|
||||
# Valid routing strategies
|
||||
valid_strategies = [
|
||||
"simple-shuffle",
|
||||
"least-busy",
|
||||
"usage-based",
|
||||
"latency-based",
|
||||
"cost-based"
|
||||
]
|
||||
```
|
||||
|
||||
### API Key Authentication Errors
|
||||
|
||||
**Problem**: Authentication errors when making API calls.
|
||||
|
||||
**Solution**: Verify that all API keys are valid and have the necessary permissions:
|
||||
|
||||
```python
|
||||
# Check environment variables first
|
||||
import os
|
||||
os.environ.get("OPENAI_API_KEY") # Should be set if using OpenAI models
|
||||
|
||||
# Or explicitly provide in the configuration
|
||||
model_list = [{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini",
|
||||
"api_key": "valid-api-key-here" # Ensure this is correct
|
||||
}
|
||||
}]
|
||||
```
|
||||
|
||||
### Rate Limit Handling
|
||||
|
||||
**Problem**: Encountering rate limits with multiple models.
|
||||
|
||||
**Solution**: Configure rate limits and implement fallback mechanisms:
|
||||
|
||||
```python
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "primary-model",
|
||||
"litellm_params": {"model": "primary-model", "api_key": "key1"},
|
||||
"rpm": 100 # Requests per minute
|
||||
},
|
||||
{
|
||||
"model_name": "fallback-model",
|
||||
"litellm_params": {"model": "fallback-model", "api_key": "key2"}
|
||||
}
|
||||
]
|
||||
|
||||
# Configure with fallback
|
||||
llm = LLM(
|
||||
model="primary-model",
|
||||
model_list=model_list,
|
||||
routing_strategy="least-busy" # Will route to fallback when primary is busy
|
||||
)
|
||||
```
|
||||
|
||||
### Debugging Router Issues
|
||||
|
||||
If you're experiencing issues with the router, you can enable verbose logging to get more information:
|
||||
|
||||
```python
|
||||
import litellm
|
||||
litellm.set_verbose = True
|
||||
|
||||
# Then initialize your LLM
|
||||
llm = LLM(model="gpt-4o-mini", model_list=model_list)
|
||||
```
|
||||
|
||||
This feature leverages litellm's Router functionality under the hood, providing robust load balancing and fallback capabilities for your CrewAI agents. The implementation ensures predictability and consistency in model selection while maintaining security through proper API key management.
|
||||
@@ -1,9 +1,10 @@
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Literal, Optional, Union
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator, field_validator
|
||||
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
@@ -86,7 +87,20 @@ class Agent(BaseAgent):
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
function_calling_llm: Optional[Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
description="Language model that will handle function calling for the agent.", default=None
|
||||
)
|
||||
class RoutingStrategy(str, Enum):
|
||||
SIMPLE_SHUFFLE = "simple-shuffle"
|
||||
LEAST_BUSY = "least-busy"
|
||||
USAGE_BASED = "usage-based"
|
||||
LATENCY_BASED = "latency-based"
|
||||
COST_BASED = "cost-based"
|
||||
|
||||
model_list: Optional[List[Dict[str, Any]]] = Field(
|
||||
default=None, description="List of model configurations for routing between multiple models."
|
||||
)
|
||||
routing_strategy: Optional[RoutingStrategy] = Field(
|
||||
default=None, description="Strategy for routing between multiple models (e.g., 'simple-shuffle', 'least-busy', 'usage-based', 'latency-based', 'cost-based')."
|
||||
)
|
||||
system_template: Optional[str] = Field(
|
||||
default=None, description="System format for the agent."
|
||||
@@ -148,10 +162,17 @@ class Agent(BaseAgent):
|
||||
# Handle different cases for self.llm
|
||||
if isinstance(self.llm, str):
|
||||
# If it's a string, create an LLM instance
|
||||
self.llm = LLM(model=self.llm)
|
||||
self.llm = LLM(
|
||||
model=self.llm,
|
||||
model_list=self.model_list,
|
||||
routing_strategy=self.routing_strategy
|
||||
)
|
||||
elif isinstance(self.llm, LLM):
|
||||
# If it's already an LLM instance, keep it as is
|
||||
pass
|
||||
if self.model_list and not getattr(self.llm, "model_list", None):
|
||||
self.llm.model_list = self.model_list
|
||||
self.llm.routing_strategy = self.routing_strategy
|
||||
self.llm._initialize_router()
|
||||
elif self.llm is None:
|
||||
# Determine the model name from environment variables or use default
|
||||
model_name = (
|
||||
@@ -159,7 +180,11 @@ class Agent(BaseAgent):
|
||||
or os.environ.get("MODEL")
|
||||
or "gpt-4o-mini"
|
||||
)
|
||||
llm_params = {"model": model_name}
|
||||
llm_params = {
|
||||
"model": model_name,
|
||||
"model_list": self.model_list,
|
||||
"routing_strategy": self.routing_strategy
|
||||
}
|
||||
|
||||
api_base = os.environ.get("OPENAI_API_BASE") or os.environ.get(
|
||||
"OPENAI_BASE_URL"
|
||||
@@ -207,6 +232,8 @@ class Agent(BaseAgent):
|
||||
"api_key": getattr(self.llm, "api_key", None),
|
||||
"base_url": getattr(self.llm, "base_url", None),
|
||||
"organization": getattr(self.llm, "organization", None),
|
||||
"model_list": self.model_list,
|
||||
"routing_strategy": self.routing_strategy,
|
||||
}
|
||||
# Remove None values to avoid passing unnecessary parameters
|
||||
llm_params = {k: v for k, v in llm_params.items() if v is not None}
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
from importlib.metadata import version as get_version
|
||||
from typing import Optional
|
||||
|
||||
from typing import Union
|
||||
|
||||
from crewai.llm import LLM
|
||||
import click
|
||||
|
||||
from crewai.cli.add_crew_to_flow import add_crew_to_flow
|
||||
@@ -183,15 +180,8 @@ def reset_memories(
|
||||
default="gpt-4o-mini",
|
||||
help="LLM Model to run the tests on the Crew. For now only accepting only OpenAI models.",
|
||||
)
|
||||
def test(n_iterations: int, model: Union[str, LLM]):
|
||||
"""Test the crew and evaluate the results using either a model name or LLM instance.
|
||||
|
||||
Args:
|
||||
n_iterations: The number of iterations to run the test.
|
||||
model: Either a model name string or an LLM instance to use for evaluating
|
||||
the performance of the agents. If a string is provided, it will be used
|
||||
to create an LLM instance.
|
||||
"""
|
||||
def test(n_iterations: int, model: str):
|
||||
"""Test the crew and evaluate the results."""
|
||||
click.echo(f"Testing the crew for {n_iterations} iterations with model {model}")
|
||||
evaluate_crew(n_iterations, model)
|
||||
|
||||
|
||||
@@ -18,9 +18,6 @@ from pydantic import (
|
||||
)
|
||||
from pydantic_core import PydanticCustomError
|
||||
|
||||
from typing import Union
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
@@ -1078,30 +1075,19 @@ class Crew(BaseModel):
|
||||
def test(
|
||||
self,
|
||||
n_iterations: int,
|
||||
openai_model_name: Optional[Union[str, LLM]] = None,
|
||||
openai_model_name: Optional[str] = None,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations.
|
||||
|
||||
Args:
|
||||
n_iterations: The number of iterations to run the test.
|
||||
openai_model_name: Either a model name string or an LLM instance to use for evaluating
|
||||
the performance of the agents. If a string is provided, it will be used to create
|
||||
an LLM instance.
|
||||
inputs: The inputs to use for the test.
|
||||
|
||||
Raises:
|
||||
ValueError: If openai_model_name is not a string or LLM instance.
|
||||
"""
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
||||
test_crew = self.copy()
|
||||
|
||||
self._test_execution_span = test_crew._telemetry.test_execution_span(
|
||||
test_crew,
|
||||
n_iterations,
|
||||
inputs,
|
||||
openai_model_name,
|
||||
)
|
||||
evaluator = CrewEvaluator(test_crew, openai_model_name)
|
||||
openai_model_name, # type: ignore[arg-type]
|
||||
) # type: ignore[arg-type]
|
||||
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
|
||||
|
||||
for i in range(1, n_iterations + 1):
|
||||
evaluator.set_iteration(i)
|
||||
|
||||
@@ -7,12 +7,17 @@ from contextlib import contextmanager
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import litellm
|
||||
from litellm import Router as LiteLLMRouter
|
||||
from litellm import get_supported_openai_params
|
||||
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||
|
||||
from crewai.utilities.logger import Logger
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
|
||||
logger = Logger(verbose=True)
|
||||
|
||||
|
||||
class FilteredStream:
|
||||
def __init__(self, original_stream):
|
||||
@@ -113,6 +118,8 @@ class LLM:
|
||||
api_version: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
callbacks: List[Any] = [],
|
||||
model_list: Optional[List[Dict[str, Any]]] = None,
|
||||
routing_strategy: Optional[str] = None,
|
||||
**kwargs,
|
||||
):
|
||||
self.model = model
|
||||
@@ -136,11 +143,50 @@ class LLM:
|
||||
self.callbacks = callbacks
|
||||
self.context_window_size = 0
|
||||
self.kwargs = kwargs
|
||||
self.model_list = model_list
|
||||
self.routing_strategy = routing_strategy
|
||||
self.router = None
|
||||
|
||||
litellm.drop_params = True
|
||||
litellm.set_verbose = False
|
||||
self.set_callbacks(callbacks)
|
||||
self.set_env_callbacks()
|
||||
|
||||
if self.model_list:
|
||||
self._initialize_router()
|
||||
|
||||
def _initialize_router(self):
|
||||
"""
|
||||
Initialize the litellm Router with the provided model_list and routing_strategy.
|
||||
"""
|
||||
try:
|
||||
router_kwargs = {}
|
||||
if self.routing_strategy:
|
||||
valid_strategies = ["simple-shuffle", "least-busy", "usage-based", "latency-based", "cost-based"]
|
||||
if self.routing_strategy not in valid_strategies:
|
||||
raise ValueError(f"Invalid routing strategy: {self.routing_strategy}. Valid options are: {', '.join(valid_strategies)}")
|
||||
router_kwargs["routing_strategy"] = self.routing_strategy
|
||||
|
||||
self.router = LiteLLMRouter(
|
||||
model_list=self.model_list,
|
||||
**router_kwargs
|
||||
)
|
||||
except Exception as e:
|
||||
logger.log("error", f"Failed to initialize router: {str(e)}")
|
||||
raise RuntimeError(f"Router initialization failed: {str(e)}")
|
||||
|
||||
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
|
||||
def _execute_router_call(self, params):
|
||||
"""
|
||||
Execute a call to the router with retry logic for handling transient issues.
|
||||
|
||||
Args:
|
||||
params: Parameters to pass to the router completion method
|
||||
|
||||
Returns:
|
||||
The response from the router
|
||||
"""
|
||||
return self.router.completion(model=self.model, **params)
|
||||
|
||||
def call(self, messages: List[Dict[str, str]], callbacks: List[Any] = []) -> str:
|
||||
with suppress_warnings():
|
||||
@@ -149,7 +195,6 @@ class LLM:
|
||||
|
||||
try:
|
||||
params = {
|
||||
"model": self.model,
|
||||
"messages": messages,
|
||||
"timeout": self.timeout,
|
||||
"temperature": self.temperature,
|
||||
@@ -164,9 +209,6 @@ class LLM:
|
||||
"seed": self.seed,
|
||||
"logprobs": self.logprobs,
|
||||
"top_logprobs": self.top_logprobs,
|
||||
"api_base": self.base_url,
|
||||
"api_version": self.api_version,
|
||||
"api_key": self.api_key,
|
||||
"stream": False,
|
||||
**self.kwargs,
|
||||
}
|
||||
@@ -174,7 +216,17 @@ class LLM:
|
||||
# Remove None values to avoid passing unnecessary parameters
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
response = litellm.completion(**params)
|
||||
if self.router:
|
||||
response = self._execute_router_call(params)
|
||||
else:
|
||||
params.update({
|
||||
"model": self.model,
|
||||
"api_base": self.base_url,
|
||||
"api_version": self.api_version,
|
||||
"api_key": self.api_key,
|
||||
})
|
||||
response = litellm.completion(**params)
|
||||
|
||||
return response["choices"][0]["message"]["content"]
|
||||
except Exception as e:
|
||||
if not LLMContextLengthExceededException(
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
from typing import Union
|
||||
|
||||
from crewai.llm import LLM
|
||||
from collections import defaultdict
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from crewai.utilities.logger import Logger
|
||||
from rich.box import HEAVY_EDGE
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
@@ -27,7 +23,7 @@ class CrewEvaluator:
|
||||
|
||||
Attributes:
|
||||
crew (Crew): The crew of agents to evaluate.
|
||||
openai_model_name (Union[str, LLM]): Either a model name string or an LLM instance to use for evaluating the performance of the agents.
|
||||
openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted).
|
||||
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
|
||||
iteration (int): The current iteration of the evaluation.
|
||||
"""
|
||||
@@ -36,29 +32,10 @@ class CrewEvaluator:
|
||||
run_execution_times: defaultdict = defaultdict(list)
|
||||
iteration: int = 0
|
||||
|
||||
def __init__(self, crew, openai_model_name: Union[str, LLM]):
|
||||
"""Initialize the CrewEvaluator.
|
||||
|
||||
Args:
|
||||
crew (Crew): The crew to evaluate
|
||||
openai_model_name (Union[str, LLM]): Either a model name string or an LLM instance
|
||||
to use for evaluation. If a string is provided, it will be used to create an
|
||||
LLM instance with default settings. If an LLM instance is provided, its settings
|
||||
(like temperature) will be preserved.
|
||||
|
||||
Raises:
|
||||
ValueError: If openai_model_name is not a string or LLM instance.
|
||||
"""
|
||||
def __init__(self, crew, openai_model_name: str):
|
||||
self.crew = crew
|
||||
if not isinstance(openai_model_name, (str, LLM)):
|
||||
raise ValueError(f"Invalid model type '{type(openai_model_name)}'. Expected str or LLM instance.")
|
||||
self.model_instance = openai_model_name if isinstance(openai_model_name, LLM) else LLM(model=openai_model_name)
|
||||
self.openai_model_name = openai_model_name
|
||||
self._telemetry = Telemetry()
|
||||
self._logger = Logger()
|
||||
self._logger.log(
|
||||
"info",
|
||||
f"Initializing CrewEvaluator with model: {openai_model_name if isinstance(openai_model_name, str) else openai_model_name.model}"
|
||||
)
|
||||
self._setup_for_evaluating()
|
||||
|
||||
def _setup_for_evaluating(self) -> None:
|
||||
@@ -74,7 +51,7 @@ class CrewEvaluator:
|
||||
),
|
||||
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
|
||||
verbose=False,
|
||||
llm=self.model_instance,
|
||||
llm=self.openai_model_name,
|
||||
)
|
||||
|
||||
def _evaluation_task(
|
||||
@@ -204,11 +181,7 @@ class CrewEvaluator:
|
||||
self.crew,
|
||||
evaluation_result.pydantic.quality,
|
||||
current_task._execution_time,
|
||||
self.model_instance.model,
|
||||
)
|
||||
self._logger.log(
|
||||
"info",
|
||||
f"Task evaluation completed with quality score: {evaluation_result.pydantic.quality}"
|
||||
self.openai_model_name,
|
||||
)
|
||||
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
|
||||
self.run_execution_times[self.iteration].append(
|
||||
|
||||
@@ -10,7 +10,6 @@ import instructor
|
||||
import pydantic_core
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.crew import Crew
|
||||
@@ -301,35 +300,6 @@ def test_hierarchical_process():
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_test_with_custom_llm():
|
||||
"""Test that Crew.test() works correctly with custom LLM instances."""
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=researcher,
|
||||
)
|
||||
custom_llm = LLM(model="gpt-4", temperature=0.5)
|
||||
crew = Crew(agents=[researcher], tasks=[task], process=Process.sequential)
|
||||
|
||||
with mock.patch('crewai.crew.CrewEvaluator') as mock_evaluator:
|
||||
crew.test(n_iterations=1, openai_model_name=custom_llm)
|
||||
mock_evaluator.assert_called_once_with(mock.ANY, custom_llm)
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_test_backward_compatibility():
|
||||
"""Test that Crew.test() maintains backward compatibility with string model names."""
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=researcher,
|
||||
)
|
||||
crew = Crew(agents=[researcher], tasks=[task], process=Process.sequential)
|
||||
|
||||
with mock.patch('crewai.crew.CrewEvaluator') as mock_evaluator:
|
||||
crew.test(n_iterations=1, openai_model_name="gpt-4")
|
||||
mock_evaluator.assert_called_once_with(mock.ANY, "gpt-4")
|
||||
|
||||
def test_manager_llm_requirement_for_hierarchical_process():
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
@@ -1153,7 +1123,7 @@ def test_kickoff_for_each_empty_input():
|
||||
assert results == []
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headeruvs=["authorization"])
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_kickoff_for_each_invalid_input():
|
||||
"""Tests if kickoff_for_each raises TypeError for invalid input types."""
|
||||
|
||||
@@ -3155,4 +3125,4 @@ def test_multimodal_agent_live_image_analysis():
|
||||
# Verify we got a meaningful response
|
||||
assert isinstance(result.raw, str)
|
||||
assert len(result.raw) > 100 # Expecting a detailed analysis
|
||||
assert "error" not in result.raw.lower() # No error messages in response
|
||||
assert "error" not in result.raw.lower() # No error messages in response
|
||||
246
tests/multiple_model_config_test.py
Normal file
246
tests/multiple_model_config_test.py
Normal file
@@ -0,0 +1,246 @@
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.agent import Agent
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@patch("litellm.Router")
|
||||
@patch.object(LLM, '_initialize_router')
|
||||
def test_llm_with_model_list(mock_initialize_router, mock_router):
|
||||
"""Test that LLM can be initialized with a model_list for multiple model configurations."""
|
||||
mock_initialize_router.return_value = None
|
||||
|
||||
mock_router_instance = MagicMock()
|
||||
mock_router.return_value = mock_router_instance
|
||||
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini",
|
||||
"api_key": "test-key-1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": "test-key-2"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
llm = LLM(model="gpt-4o-mini", model_list=model_list)
|
||||
llm.router = mock_router_instance
|
||||
|
||||
assert llm.model == "gpt-4o-mini"
|
||||
assert llm.model_list == model_list
|
||||
assert llm.router is not None
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@patch("litellm.Router")
|
||||
@patch.object(LLM, '_initialize_router')
|
||||
def test_llm_with_routing_strategy(mock_initialize_router, mock_router):
|
||||
"""Test that LLM can be initialized with a routing strategy."""
|
||||
mock_initialize_router.return_value = None
|
||||
|
||||
mock_router_instance = MagicMock()
|
||||
mock_router.return_value = mock_router_instance
|
||||
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini",
|
||||
"api_key": "test-key-1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": "test-key-2"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
llm = LLM(
|
||||
model="gpt-4o-mini",
|
||||
model_list=model_list,
|
||||
routing_strategy="simple-shuffle"
|
||||
)
|
||||
llm.router = mock_router_instance
|
||||
|
||||
assert llm.routing_strategy == "simple-shuffle"
|
||||
assert llm.router is not None
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@patch("litellm.Router")
|
||||
@patch.object(LLM, '_initialize_router')
|
||||
def test_agent_with_model_list(mock_initialize_router, mock_router):
|
||||
"""Test that Agent can be initialized with a model_list for multiple model configurations."""
|
||||
mock_initialize_router.return_value = None
|
||||
|
||||
mock_router_instance = MagicMock()
|
||||
mock_router.return_value = mock_router_instance
|
||||
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini",
|
||||
"api_key": "test-key-1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": "test-key-2"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
with patch.object(Agent, 'post_init_setup', wraps=Agent.post_init_setup) as mock_post_init:
|
||||
agent = Agent(
|
||||
role="test",
|
||||
goal="test",
|
||||
backstory="test",
|
||||
model_list=model_list
|
||||
)
|
||||
|
||||
agent.llm.router = mock_router_instance
|
||||
|
||||
assert agent.model_list == model_list
|
||||
assert agent.llm.model_list == model_list
|
||||
assert agent.llm.router is not None
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@patch("litellm.Router")
|
||||
@patch.object(LLM, '_initialize_router')
|
||||
def test_llm_call_with_router(mock_initialize_router, mock_router):
|
||||
"""Test that LLM.call uses the router when model_list is provided."""
|
||||
mock_initialize_router.return_value = None
|
||||
|
||||
mock_router_instance = MagicMock()
|
||||
mock_router.return_value = mock_router_instance
|
||||
|
||||
mock_response = {
|
||||
"choices": [{"message": {"content": "Test response"}}]
|
||||
}
|
||||
mock_router_instance.completion.return_value = mock_response
|
||||
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini",
|
||||
"api_key": "test-key-1"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# Create LLM with model_list
|
||||
llm = LLM(model="gpt-4o-mini", model_list=model_list)
|
||||
|
||||
llm.router = mock_router_instance
|
||||
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
response = llm.call(messages)
|
||||
|
||||
mock_router_instance.completion.assert_called_once()
|
||||
assert response == "Test response"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@patch("litellm.completion")
|
||||
def test_llm_call_without_router(mock_completion):
|
||||
"""Test that LLM.call uses litellm.completion when no model_list is provided."""
|
||||
mock_response = {
|
||||
"choices": [{"message": {"content": "Test response"}}]
|
||||
}
|
||||
mock_completion.return_value = mock_response
|
||||
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
response = llm.call(messages)
|
||||
|
||||
mock_completion.assert_called_once()
|
||||
assert response == "Test response"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_with_invalid_routing_strategy():
|
||||
"""Test that LLM initialization raises an error with an invalid routing strategy."""
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini",
|
||||
"api_key": "test-key-1"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
with pytest.raises(RuntimeError) as exc_info:
|
||||
LLM(
|
||||
model="gpt-4o-mini",
|
||||
model_list=model_list,
|
||||
routing_strategy="invalid-strategy"
|
||||
)
|
||||
|
||||
assert "Invalid routing strategy" in str(exc_info.value)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_with_invalid_routing_strategy():
|
||||
"""Test that Agent initialization raises an error with an invalid routing strategy."""
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o-mini",
|
||||
"api_key": "test-key-1"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
Agent(
|
||||
role="test",
|
||||
goal="test",
|
||||
backstory="test",
|
||||
model_list=model_list,
|
||||
routing_strategy="invalid-strategy"
|
||||
)
|
||||
|
||||
assert "Input should be" in str(exc_info.value)
|
||||
assert "simple-shuffle" in str(exc_info.value)
|
||||
assert "least-busy" in str(exc_info.value)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@patch.object(LLM, '_initialize_router')
|
||||
def test_llm_with_missing_model_in_litellm_params(mock_initialize_router):
|
||||
"""Test that LLM initialization raises an error when model is missing in litellm_params."""
|
||||
mock_initialize_router.side_effect = RuntimeError("Router initialization failed: Missing required 'model' in litellm_params")
|
||||
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-4o-mini",
|
||||
"litellm_params": {
|
||||
"api_key": "test-key-1"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
with pytest.raises(RuntimeError) as exc_info:
|
||||
LLM(model="gpt-4o-mini", model_list=model_list)
|
||||
|
||||
assert "Router initialization failed" in str(exc_info.value)
|
||||
@@ -2,7 +2,6 @@ from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.task import Task
|
||||
@@ -132,30 +131,6 @@ class TestCrewEvaluator:
|
||||
# Ensure the console prints the table
|
||||
console.assert_has_calls([mock.call(), mock.call().print(table())])
|
||||
|
||||
def test_evaluator_with_custom_llm(self, crew_planner):
|
||||
"""Test that CrewEvaluator correctly handles custom LLM instances."""
|
||||
custom_llm = LLM(model="gpt-4", temperature=0.5)
|
||||
evaluator = CrewEvaluator(crew_planner.crew, custom_llm)
|
||||
assert evaluator.model_instance == custom_llm
|
||||
assert evaluator.model_instance.temperature == 0.5
|
||||
|
||||
def test_evaluator_with_invalid_model_type(self, crew_planner):
|
||||
"""Test that CrewEvaluator raises error for invalid model type."""
|
||||
with pytest.raises(ValueError, match="Invalid model type"):
|
||||
CrewEvaluator(crew_planner.crew, 123)
|
||||
|
||||
def test_evaluator_preserves_model_settings(self, crew_planner):
|
||||
"""Test that CrewEvaluator preserves model settings."""
|
||||
custom_llm = LLM(model="gpt-4", temperature=0.7)
|
||||
evaluator = CrewEvaluator(crew_planner.crew, custom_llm)
|
||||
assert evaluator.model_instance.temperature == 0.7
|
||||
|
||||
def test_evaluator_with_model_name(self, crew_planner):
|
||||
"""Test that CrewEvaluator correctly handles string model names."""
|
||||
evaluator = CrewEvaluator(crew_planner.crew, "gpt-4")
|
||||
assert isinstance(evaluator.model_instance, LLM)
|
||||
assert evaluator.model_instance.model == "gpt-4"
|
||||
|
||||
def test_evaluate(self, crew_planner):
|
||||
task_output = TaskOutput(
|
||||
description="Task 1", agent=str(crew_planner.crew.agents[0])
|
||||
|
||||
Reference in New Issue
Block a user