From 989ef138fc4fc90375640b62d689b0b46be280ea Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 18 Apr 2025 08:44:20 +0000 Subject: [PATCH] Update litellm dependency to v1.66.3 to fix #2640 Co-Authored-By: Joe Moura --- pyproject.toml | 2 +- tests/litellm_update_test.py | 47 ++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 tests/litellm_update_test.py diff --git a/pyproject.toml b/pyproject.toml index 7528a2ecc..65efbcd68 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ dependencies = [ # Core Dependencies "pydantic>=2.4.2", "openai>=1.13.3", - "litellm==1.60.2", + "litellm==1.66.3", "instructor>=1.3.3", # Text Processing "pdfplumber>=0.11.4", diff --git a/tests/litellm_update_test.py b/tests/litellm_update_test.py new file mode 100644 index 000000000..902f0ffc2 --- /dev/null +++ b/tests/litellm_update_test.py @@ -0,0 +1,47 @@ +import pytest +from unittest.mock import MagicMock, patch + +from crewai.llm import LLM + + +def test_llm_call_with_litellm_1_66_3(): + """Test that the LLM class works with litellm v1.66.3+""" + llm = LLM( + model="gpt-3.5-turbo", + temperature=0.7, + max_tokens=50, + stop=["STOP"], + presence_penalty=0.1, + frequency_penalty=0.1, + ) + messages = [{"role": "user", "content": "Say 'Hello, World!' and then say STOP"}] + + with patch("litellm.completion") as mocked_completion: + mock_message = MagicMock() + mock_message.content = "Hello, World! I won't say the stop word." + mock_choice = MagicMock() + mock_choice.message = mock_message + mock_response = MagicMock() + mock_response.choices = [mock_choice] + mock_response.usage = { + "prompt_tokens": 10, + "completion_tokens": 10, + "total_tokens": 20, + } + + mocked_completion.return_value = mock_response + + response = llm.call(messages) + + mocked_completion.assert_called_once() + + assert "Hello, World!" in response + assert "STOP" not in response + + _, kwargs = mocked_completion.call_args + assert kwargs["model"] == "gpt-3.5-turbo" + assert kwargs["temperature"] == 0.7 + assert kwargs["max_tokens"] == 50 + assert kwargs["stop"] == ["STOP"] + assert kwargs["presence_penalty"] == 0.1 + assert kwargs["frequency_penalty"] == 0.1