fix: convert tool_calls to string when no available_functions provided

Fixes #4036 - Models with native function calling (like Ollama) fail when
the model returns tool_calls but no available_functions are provided.

Previously, when a model returned tool_calls without text content and no
available_functions were provided, the raw tool_calls list was returned.
This list would then cause a "'list' object has no attribute 'rstrip'"
error when passed to format_message_for_llm().

Now, tool_calls are converted to a human-readable string representation
containing the tool name and arguments. This allows the agent to see what
tool the model wanted to call and handle it through text-based parsing.

Changes:
- Modified _handle_non_streaming_response to convert tool_calls to string
- Modified _ahandle_non_streaming_response with the same fix
- Added warning log when this conversion occurs
- Added tests covering the fix for both sync and async paths

Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
Devin AI
2025-12-05 20:55:10 +00:00
parent f2f994612c
commit 1c082b909f
2 changed files with 225 additions and 3 deletions

View File

@@ -1197,9 +1197,36 @@ class LLM(BaseLLM):
)
return text_response
# --- 6) If there is no text response, no available functions, but there are tool calls, return the tool calls
# --- 6) If there is no text response, no available functions, but there are tool calls,
# convert tool calls to a string representation instead of returning raw list
if tool_calls and not available_functions and not text_response:
return tool_calls
try:
formatted_calls = []
for call in tool_calls:
fn = getattr(call, "function", None)
name = getattr(fn, "name", None) if fn else None
args = getattr(fn, "arguments", None) if fn else None
formatted_calls.append(
f"Tool: {name or 'unknown'}\n"
f"Arguments: {args or '{}'}"
)
text_response = "\n\n".join(formatted_calls)
except Exception:
text_response = str(tool_calls)
logging.warning(
"Model returned tool_calls but no available_functions were provided. "
"Returning a string representation of the tool calls."
)
self._handle_emit_call_events(
response=text_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return text_response
# --- 7) Handle tool calls if present
tool_result = self._handle_tool_call(
@@ -1315,8 +1342,35 @@ class LLM(BaseLLM):
)
return text_response
# Convert tool calls to string representation when no available_functions
if tool_calls and not available_functions and not text_response:
return tool_calls
try:
formatted_calls = []
for call in tool_calls:
fn = getattr(call, "function", None)
name = getattr(fn, "name", None) if fn else None
args = getattr(fn, "arguments", None) if fn else None
formatted_calls.append(
f"Tool: {name or 'unknown'}\n"
f"Arguments: {args or '{}'}"
)
text_response = "\n\n".join(formatted_calls)
except Exception:
text_response = str(tool_calls)
logging.warning(
"Model returned tool_calls but no available_functions were provided. "
"Returning a string representation of the tool calls."
)
self._handle_emit_call_events(
response=text_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return text_response
tool_result = self._handle_tool_call(
tool_calls, available_functions, from_task, from_agent

View File

@@ -877,3 +877,171 @@ def test_validate_model_in_constants():
LLM._validate_model_in_constants("anthropic.claude-future-v1:0", "bedrock")
is True
)
def test_tool_calls_without_available_functions_returns_string():
"""Test that tool_calls without available_functions returns a string representation.
This tests the fix for GitHub issue #4036 where Ollama models with native
function calling would return a list of tool calls instead of a string,
causing "'list' object has no attribute 'rstrip'" error.
"""
llm = LLM(model="gpt-4o-mini", is_litellm=True)
with patch("litellm.completion") as mock_completion:
# Create a mock tool call object
mock_function = MagicMock()
mock_function.name = "get_weather"
mock_function.arguments = '{"location": "San Francisco"}'
mock_tool_call = MagicMock()
mock_tool_call.function = mock_function
# Create mock response with tool_calls but no content
mock_message = MagicMock()
mock_message.content = None # No text content
mock_message.tool_calls = [mock_tool_call]
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = {
"prompt_tokens": 10,
"completion_tokens": 10,
"total_tokens": 20,
}
mock_completion.return_value = mock_response
# Call without available_functions - should return string, not list
result = llm.call("What's the weather in San Francisco?")
# Result should be a string, not a list
assert isinstance(result, str), f"Expected str, got {type(result).__name__}"
# The string should contain the tool name and arguments
assert "get_weather" in result
assert "San Francisco" in result
def test_tool_calls_without_available_functions_multiple_calls():
"""Test that multiple tool_calls without available_functions returns a formatted string."""
llm = LLM(model="gpt-4o-mini", is_litellm=True)
with patch("litellm.completion") as mock_completion:
# Create mock tool calls
mock_function1 = MagicMock()
mock_function1.name = "get_weather"
mock_function1.arguments = '{"location": "San Francisco"}'
mock_function2 = MagicMock()
mock_function2.name = "get_time"
mock_function2.arguments = '{"timezone": "PST"}'
mock_tool_call1 = MagicMock()
mock_tool_call1.function = mock_function1
mock_tool_call2 = MagicMock()
mock_tool_call2.function = mock_function2
# Create mock response with multiple tool_calls but no content
mock_message = MagicMock()
mock_message.content = None
mock_message.tool_calls = [mock_tool_call1, mock_tool_call2]
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = {
"prompt_tokens": 10,
"completion_tokens": 10,
"total_tokens": 20,
}
mock_completion.return_value = mock_response
result = llm.call("What's the weather and time?")
assert isinstance(result, str)
assert "get_weather" in result
assert "get_time" in result
assert "San Francisco" in result
assert "PST" in result
def test_tool_calls_with_text_response_returns_text():
"""Test that when both tool_calls and text content exist, text is returned."""
llm = LLM(model="gpt-4o-mini", is_litellm=True)
with patch("litellm.completion") as mock_completion:
mock_function = MagicMock()
mock_function.name = "get_weather"
mock_function.arguments = '{"location": "San Francisco"}'
mock_tool_call = MagicMock()
mock_tool_call.function = mock_function
# Create mock response with both tool_calls AND text content
mock_message = MagicMock()
mock_message.content = "Here is the weather information"
mock_message.tool_calls = [mock_tool_call]
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = {
"prompt_tokens": 10,
"completion_tokens": 10,
"total_tokens": 20,
}
mock_completion.return_value = mock_response
result = llm.call("What's the weather?")
# When text content exists, it should be returned
assert isinstance(result, str)
assert result == "Here is the weather information"
@pytest.mark.asyncio
async def test_async_tool_calls_without_available_functions_returns_string():
"""Test that async tool_calls without available_functions returns a string."""
llm = LLM(model="gpt-4o-mini", is_litellm=True)
with patch("litellm.acompletion") as mock_acompletion:
mock_function = MagicMock()
mock_function.name = "search_database"
mock_function.arguments = '{"query": "test"}'
mock_tool_call = MagicMock()
mock_tool_call.function = mock_function
mock_message = MagicMock()
mock_message.content = None
mock_message.tool_calls = [mock_tool_call]
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = {
"prompt_tokens": 10,
"completion_tokens": 10,
"total_tokens": 20,
}
mock_acompletion.return_value = mock_response
result = await llm.acall("Search for test")
assert isinstance(result, str)
assert "search_database" in result
assert "test" in result