improve: address PR feedback for OpenAI 1.78 support

- Add inline documentation for litellm version constraints
- Improve test implementation with pytest markers and constants
- Add clearer assertion messages for better error reporting

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-05-27 12:15:00 +00:00
parent 8583a7a30d
commit 75b4e84457
2 changed files with 22 additions and 15 deletions

View File

@@ -11,6 +11,8 @@ dependencies = [
# Core Dependencies # Core Dependencies
"pydantic>=2.4.2", "pydantic>=2.4.2",
"openai>=1.13.3", "openai>=1.13.3",
# litellm: v1.68.0+ required for OpenAI 1.78 compatibility
# Upper bound <1.72.0 to ensure stability and prevent issues with breaking changes
"litellm>=1.68.0,<1.72.0", "litellm>=1.68.0,<1.72.0",
"instructor>=1.3.3", "instructor>=1.3.3",
# Text Processing # Text Processing

View File

@@ -4,20 +4,22 @@ import pytest
from crewai import LLM, Agent, Crew, Task, TaskOutput from crewai import LLM, Agent, Crew, Task, TaskOutput
TEST_IMAGES = {
"product_shoe": "https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244",
"sample_image": "https://example.com/sample-image.jpg"
}
@pytest.mark.requires_api_key
@pytest.mark.skip(reason="Only run manually with valid API keys") @pytest.mark.skip(reason="Only run manually with valid API keys")
def test_openai_178_compatibility_with_multimodal(): def test_openai_178_compatibility_with_multimodal():
""" """Test CrewAI compatibility with OpenAI 1.78.0 multi-image support."""
Test that CrewAI works with OpenAI 1.78.0 and multi-image input support. if not os.getenv("OPENAI_API_KEY"):
This test verifies the fix for issue #2910. pytest.skip("Test requires OPENAI_API_KEY environment variable")
"""
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if not OPENAI_API_KEY:
pytest.skip("OPENAI_API_KEY environment variable not set")
llm = LLM( llm = LLM(
model="openai/gpt-4o", # model with vision capabilities model="openai/gpt-4o", # model with vision capabilities
api_key=OPENAI_API_KEY, api_key=os.getenv("OPENAI_API_KEY"),
temperature=0.7 temperature=0.7
) )
@@ -34,11 +36,14 @@ def test_openai_178_compatibility_with_multimodal():
analysis_task = Task( analysis_task = Task(
description=""" description="""
Analyze these product images: Analyze these product images:
1. https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244 1. {product_shoe}
2. https://example.com/sample-image.jpg 2. {sample_image}
Provide a comparative analysis focusing on design elements and quality indicators. Provide a comparative analysis focusing on design elements and quality indicators.
""", """.format(
product_shoe=TEST_IMAGES["product_shoe"],
sample_image=TEST_IMAGES["sample_image"]
),
expected_output="A comparative analysis of the provided images", expected_output="A comparative analysis of the provided images",
agent=visual_agent agent=visual_agent
) )
@@ -46,8 +51,8 @@ def test_openai_178_compatibility_with_multimodal():
crew = Crew(agents=[visual_agent], tasks=[analysis_task]) crew = Crew(agents=[visual_agent], tasks=[analysis_task])
result = crew.kickoff() result = crew.kickoff()
assert result is not None assert result is not None, "Crew execution returned None"
assert len(result.tasks_output) == 1 assert len(result.tasks_output) == 1, "Expected exactly one task output"
task_output = result.tasks_output[0] task_output = result.tasks_output[0]
assert isinstance(task_output, TaskOutput) assert isinstance(task_output, TaskOutput), f"Expected TaskOutput, got {type(task_output)}"
assert len(task_output.raw) > 0 assert len(task_output.raw) > 0, "Task output should contain content"