From 49b8cc95ae6a76af37f39e9b85c1f85d4a3e222b Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 12:11:06 +0000 Subject: [PATCH] fix: update LLMCallStartedEvent message type to support multimodal content (#2475) fix: sort imports in test file to fix linting fix: properly sort imports with ruff Co-Authored-By: Joe Moura --- src/crewai/utilities/events/llm_events.py | 2 +- tests/test_multimodal_validation.py | 46 +++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 tests/test_multimodal_validation.py diff --git a/src/crewai/utilities/events/llm_events.py b/src/crewai/utilities/events/llm_events.py index 988b6f945..10a648e86 100644 --- a/src/crewai/utilities/events/llm_events.py +++ b/src/crewai/utilities/events/llm_events.py @@ -15,7 +15,7 @@ class LLMCallStartedEvent(CrewEvent): """Event emitted when a LLM call starts""" type: str = "llm_call_started" - messages: Union[str, List[Dict[str, str]]] + messages: Union[str, List[Dict[str, Any]]] tools: Optional[List[dict]] = None callbacks: Optional[List[Any]] = None available_functions: Optional[Dict[str, Any]] = None diff --git a/tests/test_multimodal_validation.py b/tests/test_multimodal_validation.py new file mode 100644 index 000000000..3b0817bf2 --- /dev/null +++ b/tests/test_multimodal_validation.py @@ -0,0 +1,46 @@ +import os + +import pytest + +from crewai import LLM, Agent, Crew, Task + + +@pytest.mark.skip(reason="Only run manually with valid API keys") +def test_multimodal_agent_with_image_url(): + """ + Test that a multimodal agent can process images without validation errors. + This test reproduces the scenario from issue #2475. + """ + OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") + if not OPENAI_API_KEY: + pytest.skip("OPENAI_API_KEY environment variable not set") + + llm = LLM( + model="openai/gpt-4o", # model with vision capabilities + api_key=OPENAI_API_KEY, + temperature=0.7 + ) + + expert_analyst = Agent( + role="Visual Quality Inspector", + goal="Perform detailed quality analysis of product images", + backstory="Senior quality control expert with expertise in visual inspection", + llm=llm, + verbose=True, + allow_delegation=False, + multimodal=True + ) + + inspection_task = Task( + description=""" + Analyze the product image at https://www.us.maguireshoes.com/collections/spring-25/products/lucena-black-boot with focus on: + 1. Quality of materials + 2. Manufacturing defects + 3. Compliance with standards + Provide a detailed report highlighting any issues found. + """, + expected_output="A detailed report highlighting any issues found", + agent=expert_analyst + ) + + crew = Crew(agents=[expert_analyst], tasks=[inspection_task])