mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 00:28:31 +00:00
Address PR review feedback: Move model list to class constant, optimize methods, add docstrings, enhance tests
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -92,6 +92,8 @@ def suppress_warnings():
|
|||||||
|
|
||||||
|
|
||||||
class LLM:
|
class LLM:
|
||||||
|
MODELS_WITHOUT_STOP_SUPPORT = ["o3", "o3-mini", "o4-mini"]
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
@@ -193,11 +195,18 @@ class LLM:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def supports_stop_words(self) -> bool:
|
def supports_stop_words(self) -> bool:
|
||||||
models_without_stop_support = ["o3", "o3-mini", "o4-mini"]
|
"""
|
||||||
|
Determines whether the current model supports the 'stop' parameter.
|
||||||
|
|
||||||
for model in models_without_stop_support:
|
This method checks if the model is in the list of models known not to support
|
||||||
if self.model.startswith(model):
|
stop words, and if not, it queries the litellm library to determine if the
|
||||||
return False
|
model supports the 'stop' parameter.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the model supports stop words, False otherwise.
|
||||||
|
"""
|
||||||
|
if any(self.model.startswith(model) for model in self.MODELS_WITHOUT_STOP_SUPPORT):
|
||||||
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
params = get_supported_openai_params(model=self.model)
|
params = get_supported_openai_params(model=self.model)
|
||||||
|
|||||||
@@ -30,31 +30,39 @@ def test_llm_callback_replacement():
|
|||||||
assert usage_metrics_1 == calc_handler_1.token_cost_process.get_summary()
|
assert usage_metrics_1 == calc_handler_1.token_cost_process.get_summary()
|
||||||
|
|
||||||
|
|
||||||
def test_supports_stop_words_for_o3_model():
|
class TestLLMStopWords:
|
||||||
"""Test that supports_stop_words returns False for o3 model."""
|
"""Tests for LLM stop words functionality."""
|
||||||
llm = LLM(model="o3")
|
|
||||||
assert not llm.supports_stop_words()
|
|
||||||
|
|
||||||
|
|
||||||
def test_supports_stop_words_for_o4_mini_model():
|
|
||||||
"""Test that supports_stop_words returns False for o4-mini model."""
|
|
||||||
llm = LLM(model="o4-mini")
|
|
||||||
assert not llm.supports_stop_words()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
||||||
def test_llm_call_excludes_stop_parameter_for_unsupported_models(monkeypatch):
|
|
||||||
"""Test that the LLM.call method excludes the stop parameter for models that don't support it."""
|
|
||||||
def mock_completion(**kwargs):
|
|
||||||
assert 'stop' not in kwargs, "Stop parameter should be excluded for o3 model"
|
|
||||||
return {"choices": [{"message": {"content": "Hello, World!"}}]}
|
|
||||||
|
|
||||||
monkeypatch.setattr("litellm.completion", mock_completion)
|
def test_supports_stop_words_for_o3_model(self):
|
||||||
|
"""Test that supports_stop_words returns False for o3 model."""
|
||||||
|
llm = LLM(model="o3")
|
||||||
|
assert not llm.supports_stop_words()
|
||||||
|
|
||||||
llm = LLM(model="o3")
|
def test_supports_stop_words_for_o4_mini_model(self):
|
||||||
llm.stop = ["STOP"]
|
"""Test that supports_stop_words returns False for o4-mini model."""
|
||||||
|
llm = LLM(model="o4-mini")
|
||||||
|
assert not llm.supports_stop_words()
|
||||||
|
|
||||||
messages = [{"role": "user", "content": "Say 'Hello, World!'"}]
|
def test_supports_stop_words_for_supported_model(self):
|
||||||
response = llm.call(messages)
|
"""Test that supports_stop_words returns True for models that support stop words."""
|
||||||
|
llm = LLM(model="gpt-4")
|
||||||
|
assert llm.supports_stop_words()
|
||||||
|
|
||||||
assert response == "Hello, World!"
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
def test_llm_call_excludes_stop_parameter_for_unsupported_models(self, monkeypatch):
|
||||||
|
"""Test that the LLM.call method excludes the stop parameter for models that don't support it."""
|
||||||
|
def mock_completion(**kwargs):
|
||||||
|
assert 'stop' not in kwargs, "Stop parameter should be excluded for o3 model"
|
||||||
|
assert 'model' in kwargs, "Model parameter should be included"
|
||||||
|
assert 'messages' in kwargs, "Messages parameter should be included"
|
||||||
|
return {"choices": [{"message": {"content": "Hello, World!"}}]}
|
||||||
|
|
||||||
|
monkeypatch.setattr("litellm.completion", mock_completion)
|
||||||
|
|
||||||
|
llm = LLM(model="o3")
|
||||||
|
llm.stop = ["STOP"]
|
||||||
|
|
||||||
|
messages = [{"role": "user", "content": "Say 'Hello, World!'"}]
|
||||||
|
response = llm.call(messages)
|
||||||
|
|
||||||
|
assert response == "Hello, World!"
|
||||||
|
|||||||
Reference in New Issue
Block a user