mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-05-01 07:13:00 +00:00
Compare commits
5 Commits
devin/1742
...
devin/1740
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c956588586 | ||
|
|
e8d61d32db | ||
|
|
1e7292d0fa | ||
|
|
b7c988b3ac | ||
|
|
6d4c591eda |
@@ -92,9 +92,43 @@ def suppress_warnings():
|
||||
|
||||
|
||||
class LLM:
|
||||
"""
|
||||
A wrapper class for language model interactions using litellm.
|
||||
|
||||
This class provides a unified interface for interacting with various language models
|
||||
through litellm. It handles model configuration, context window sizing, and callback
|
||||
management.
|
||||
|
||||
Args:
|
||||
model (str): The identifier for the language model to use. Must be a valid model ID
|
||||
with a provider prefix (e.g., 'openai/gpt-4'). Cannot be a numeric value without
|
||||
a provider prefix.
|
||||
timeout (Optional[Union[float, int]]): The timeout for API calls in seconds.
|
||||
temperature (Optional[float]): Controls randomness in the model's output.
|
||||
top_p (Optional[float]): Controls diversity via nucleus sampling.
|
||||
n (Optional[int]): Number of completions to generate.
|
||||
stop (Optional[Union[str, List[str]]]): Sequences where the model should stop generating.
|
||||
max_completion_tokens (Optional[int]): Maximum number of tokens to generate.
|
||||
max_tokens (Optional[int]): Alias for max_completion_tokens.
|
||||
presence_penalty (Optional[float]): Penalizes repeated tokens.
|
||||
frequency_penalty (Optional[float]): Penalizes frequent tokens.
|
||||
logit_bias (Optional[Dict[int, float]]): Modifies likelihood of specific tokens.
|
||||
response_format (Optional[Dict[str, Any]]): Specifies the format for the model's response.
|
||||
seed (Optional[int]): Seed for deterministic outputs.
|
||||
logprobs (Optional[bool]): Whether to return log probabilities.
|
||||
top_logprobs (Optional[int]): Number of most likely tokens to return probabilities for.
|
||||
base_url (Optional[str]): Base URL for API calls.
|
||||
api_version (Optional[str]): API version to use.
|
||||
api_key (Optional[str]): API key for authentication.
|
||||
callbacks (List[Any]): List of callback functions.
|
||||
**kwargs: Additional keyword arguments to pass to the model.
|
||||
|
||||
Raises:
|
||||
ValueError: If the model ID is empty, whitespace, or a numeric value without a provider prefix.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
model: Union[str, Any],
|
||||
timeout: Optional[Union[float, int]] = None,
|
||||
temperature: Optional[float] = None,
|
||||
top_p: Optional[float] = None,
|
||||
@@ -115,6 +149,16 @@ class LLM:
|
||||
callbacks: List[Any] = [],
|
||||
**kwargs,
|
||||
):
|
||||
# Only validate model ID if it's not None and is a numeric value without a provider prefix
|
||||
if model is not None and (
|
||||
isinstance(model, (int, float)) or
|
||||
(isinstance(model, str) and model.strip() and model.strip().isdigit())
|
||||
):
|
||||
raise ValueError(
|
||||
f"Invalid model ID: {model}. Model ID cannot be a numeric value without a provider prefix. "
|
||||
"Please specify a valid model ID with a provider prefix, e.g., 'openai/gpt-4'."
|
||||
)
|
||||
|
||||
self.model = model
|
||||
self.timeout = timeout
|
||||
self.temperature = temperature
|
||||
@@ -186,7 +230,10 @@ class LLM:
|
||||
|
||||
def supports_function_calling(self) -> bool:
|
||||
try:
|
||||
params = get_supported_openai_params(model=self.model)
|
||||
# Handle None model case
|
||||
if self.model is None:
|
||||
return False
|
||||
params = get_supported_openai_params(model=str(self.model))
|
||||
return "response_format" in params
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to get supported params: {str(e)}")
|
||||
@@ -194,7 +241,10 @@ class LLM:
|
||||
|
||||
def supports_stop_words(self) -> bool:
|
||||
try:
|
||||
params = get_supported_openai_params(model=self.model)
|
||||
# Handle None model case
|
||||
if self.model is None:
|
||||
return False
|
||||
params = get_supported_openai_params(model=str(self.model))
|
||||
return "stop" in params
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to get supported params: {str(e)}")
|
||||
@@ -208,8 +258,10 @@ class LLM:
|
||||
self.context_window_size = int(
|
||||
DEFAULT_CONTEXT_WINDOW_SIZE * CONTEXT_WINDOW_USAGE_RATIO
|
||||
)
|
||||
# Ensure model is a string before calling startswith
|
||||
model_str = str(self.model) if not isinstance(self.model, str) else self.model
|
||||
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
|
||||
if self.model.startswith(key):
|
||||
if model_str.startswith(key):
|
||||
self.context_window_size = int(value * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
return self.context_window_size
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import inspect
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, TypeVar, Union, cast
|
||||
from typing import Any, Callable, Dict, TypeVar, cast
|
||||
|
||||
import yaml
|
||||
from dotenv import load_dotenv
|
||||
@@ -116,33 +116,13 @@ def CrewBase(cls: T) -> T:
|
||||
def _map_agent_variables(
|
||||
self,
|
||||
agent_name: str,
|
||||
agent_info: Union[Dict[str, Any], List[Dict[str, Any]]],
|
||||
agent_info: Dict[str, Any],
|
||||
agents: Dict[str, Callable],
|
||||
llms: Dict[str, Callable],
|
||||
tool_functions: Dict[str, Callable],
|
||||
cache_handler_functions: Dict[str, Callable],
|
||||
callbacks: Dict[str, Callable],
|
||||
) -> None:
|
||||
"""Maps agent variables from configuration to internal state.
|
||||
|
||||
Args:
|
||||
agent_name: Name of the agent.
|
||||
agent_info: Configuration as a dictionary or list of configurations.
|
||||
agents: Dictionary of agent functions.
|
||||
llms: Dictionary of LLM functions.
|
||||
tool_functions: Dictionary of tool functions.
|
||||
cache_handler_functions: Dictionary of cache handler functions.
|
||||
callbacks: Dictionary of callback functions.
|
||||
|
||||
Raises:
|
||||
ValueError: When an empty list is provided as agent_info.
|
||||
"""
|
||||
# If agent_info is a list, use the first item as the configuration
|
||||
if isinstance(agent_info, list):
|
||||
if not agent_info:
|
||||
raise ValueError(f"Empty agent configuration list for agent {agent_name}")
|
||||
agent_info = agent_info[0]
|
||||
|
||||
if llm := agent_info.get("llm"):
|
||||
try:
|
||||
self.agents_config[agent_name]["llm"] = llms[llm]()
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
|
||||
|
||||
class TestYamlConfig:
|
||||
"""Tests for YAML configuration handling."""
|
||||
|
||||
def test_list_format_in_yaml(self):
|
||||
"""Test that list format in YAML is handled correctly."""
|
||||
# Create a test YAML content with list format
|
||||
yaml_content = """
|
||||
test_agent:
|
||||
- name: test_agent
|
||||
role: Test Agent
|
||||
goal: Test Goal
|
||||
"""
|
||||
|
||||
# Parse the YAML content
|
||||
data = yaml.safe_load(yaml_content)
|
||||
|
||||
# Get the agent_info which should be a list
|
||||
agent_name = "test_agent"
|
||||
agent_info = data[agent_name]
|
||||
|
||||
# Verify it's a list
|
||||
assert isinstance(agent_info, list)
|
||||
|
||||
# Create a function that simulates the behavior of _map_agent_variables
|
||||
# with our fix applied
|
||||
def map_agent_variables(agent_name, agent_info):
|
||||
# This is the fix we implemented
|
||||
if isinstance(agent_info, list):
|
||||
if not agent_info:
|
||||
raise ValueError(f"Empty agent configuration list for agent {agent_name}")
|
||||
agent_info = agent_info[0]
|
||||
|
||||
# Try to access a dictionary method on agent_info
|
||||
# This would fail with AttributeError if agent_info is still a list
|
||||
value = agent_info.get("name")
|
||||
return value
|
||||
|
||||
# Call the function - this would raise AttributeError before the fix
|
||||
result = map_agent_variables(agent_name, agent_info)
|
||||
|
||||
def test_empty_list_in_yaml(self):
|
||||
"""Test that empty list in YAML raises appropriate error."""
|
||||
# Create a test YAML content with empty list
|
||||
yaml_content = """
|
||||
test_agent: []
|
||||
"""
|
||||
|
||||
# Parse the YAML content
|
||||
data = yaml.safe_load(yaml_content)
|
||||
|
||||
# Get the agent_info which should be an empty list
|
||||
agent_name = "test_agent"
|
||||
agent_info = data[agent_name]
|
||||
|
||||
# Verify it's a list
|
||||
assert isinstance(agent_info, list)
|
||||
assert len(agent_info) == 0
|
||||
|
||||
# Create a function that simulates the behavior of _map_agent_variables
|
||||
def map_agent_variables(agent_name, agent_info):
|
||||
if isinstance(agent_info, list):
|
||||
if not agent_info:
|
||||
raise ValueError(f"Empty agent configuration list for agent {agent_name}")
|
||||
agent_info = agent_info[0]
|
||||
return agent_info
|
||||
|
||||
# Call the function - should raise ValueError
|
||||
with pytest.raises(ValueError, match=f"Empty agent configuration list for agent {agent_name}"):
|
||||
map_agent_variables(agent_name, agent_info)
|
||||
def test_multiple_items_in_list(self):
|
||||
"""Test that when multiple items are in the list, the first one is used."""
|
||||
# Create a test YAML content with multiple items in the list
|
||||
yaml_content = """
|
||||
test_agent:
|
||||
- name: first_agent
|
||||
role: First Agent
|
||||
goal: First Goal
|
||||
- name: second_agent
|
||||
role: Second Agent
|
||||
goal: Second Goal
|
||||
"""
|
||||
|
||||
# Parse the YAML content
|
||||
data = yaml.safe_load(yaml_content)
|
||||
|
||||
# Get the agent_info which should be a list
|
||||
agent_name = "test_agent"
|
||||
agent_info = data[agent_name]
|
||||
|
||||
# Verify it's a list with multiple items
|
||||
assert isinstance(agent_info, list)
|
||||
assert len(agent_info) > 1
|
||||
|
||||
# Create a function that simulates the behavior of _map_agent_variables
|
||||
def map_agent_variables(agent_name, agent_info):
|
||||
if isinstance(agent_info, list):
|
||||
if not agent_info:
|
||||
raise ValueError(f"Empty agent configuration list for agent {agent_name}")
|
||||
agent_info = agent_info[0]
|
||||
return agent_info.get("name")
|
||||
|
||||
# Call the function - should return name from the first item
|
||||
result = map_agent_variables(agent_name, agent_info)
|
||||
|
||||
# Verify only the first item was used
|
||||
assert result == "first_agent"
|
||||
43
tests/unit/test_llm.py
Normal file
43
tests/unit/test_llm.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"invalid_model,error_message",
|
||||
[
|
||||
(3420, "Invalid model ID: 3420. Model ID cannot be a numeric value without a provider prefix."),
|
||||
("3420", "Invalid model ID: 3420. Model ID cannot be a numeric value without a provider prefix."),
|
||||
(3.14, "Invalid model ID: 3.14. Model ID cannot be a numeric value without a provider prefix."),
|
||||
],
|
||||
)
|
||||
def test_invalid_numeric_model_ids(invalid_model, error_message):
|
||||
"""Test that numeric model IDs are rejected."""
|
||||
with pytest.raises(ValueError, match=error_message):
|
||||
LLM(model=invalid_model)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"valid_model",
|
||||
[
|
||||
"openai/gpt-4",
|
||||
"gpt-3.5-turbo",
|
||||
"anthropic/claude-2",
|
||||
],
|
||||
)
|
||||
def test_valid_model_ids(valid_model):
|
||||
"""Test that valid model IDs are accepted."""
|
||||
llm = LLM(model=valid_model)
|
||||
assert llm.model == valid_model
|
||||
|
||||
|
||||
def test_empty_model_id():
|
||||
"""Test that empty model IDs are rejected."""
|
||||
with pytest.raises(ValueError, match="Invalid model ID: ''. Model ID cannot be empty or whitespace."):
|
||||
LLM(model="")
|
||||
|
||||
|
||||
def test_whitespace_model_id():
|
||||
"""Test that whitespace model IDs are rejected."""
|
||||
with pytest.raises(ValueError, match="Invalid model ID: ' '. Model ID cannot be empty or whitespace."):
|
||||
LLM(model=" ")
|
||||
Reference in New Issue
Block a user