Fix #2541: Add support for multimodal content format in qwen2.5-vl model

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-04-09 03:06:24 +00:00
parent b992ee9d6b
commit cdd5ebfb1a
2 changed files with 50 additions and 2 deletions

View File

@@ -839,9 +839,13 @@ class LLM(BaseLLM):
# Validate message format first
for msg in messages:
if not isinstance(msg, dict) or "role" not in msg or "content" not in msg:
if not isinstance(msg, dict) or "role" not in msg:
raise TypeError(
"Invalid message format. Each message must be a dict with 'role' and 'content' keys"
"Invalid message format. Each message must be a dict with 'role' key"
)
if "content" not in msg and msg["role"] != "system":
raise TypeError(
"Invalid message format. Each non-system message must have a 'content' key"
)
# Handle O1 models specially
@@ -868,6 +872,19 @@ class LLM(BaseLLM):
messages.append({"role": "user", "content": "Please continue."})
return messages
if "qwen" in self.model.lower():
formatted_messages = []
for msg in messages:
if not isinstance(msg.get("content"), str):
formatted_messages.append(msg)
continue
formatted_messages.append({
"role": msg["role"],
"content": [{"type": "text", "text": msg["content"]}]
})
return formatted_messages
# Handle Anthropic models
if not self.is_anthropic:
return messages