mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-11 00:58:30 +00:00
fix: conditionally set stop sequences based on LLM support
- Fix issue #3317 where CrewAI injects stop sequences causing Bedrock GPT-OSS 400 errors - Only set stop sequences when LLM supports them via use_stop_words flag - Preserve existing stop sequences for unsupported models - Add comprehensive tests covering the fix - Add reproduction script to verify the issue and fix Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
65
reproduce_issue_3317.py
Normal file
65
reproduce_issue_3317.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Reproduction script for issue #3317
|
||||||
|
CrewAI injects stop sequences causing Bedrock GPT-OSS 400 errors
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from crewai import Agent, Task, Crew
|
||||||
|
from crewai.llm import LLM
|
||||||
|
|
||||||
|
def test_bedrock_stop_sequence_issue():
|
||||||
|
"""
|
||||||
|
Reproduce the issue where CrewAI automatically injects stop sequences
|
||||||
|
that cause Bedrock models to fail with 'stopSequences not supported' error.
|
||||||
|
"""
|
||||||
|
print("Testing Bedrock stop sequence issue...")
|
||||||
|
|
||||||
|
llm = LLM(
|
||||||
|
model="bedrock/converse/openai.gpt-oss-20b-1:0",
|
||||||
|
litellm_params={
|
||||||
|
"aws_region_name": "us-east-1",
|
||||||
|
"drop_params": True
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"Model supports stop words: {llm.supports_stop_words()}")
|
||||||
|
|
||||||
|
agent = Agent(
|
||||||
|
role="Test Agent",
|
||||||
|
goal="Test the stop sequence issue",
|
||||||
|
backstory="A test agent to reproduce the issue",
|
||||||
|
llm=llm,
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
|
||||||
|
task = Task(
|
||||||
|
description="Say hello",
|
||||||
|
expected_output="A simple greeting",
|
||||||
|
agent=agent
|
||||||
|
)
|
||||||
|
|
||||||
|
crew = Crew(
|
||||||
|
agents=[agent],
|
||||||
|
tasks=[task],
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = crew.kickoff()
|
||||||
|
print("SUCCESS: No error occurred")
|
||||||
|
print(f"Result: {result}")
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = str(e)
|
||||||
|
print(f"ERROR: {error_msg}")
|
||||||
|
|
||||||
|
if "stopSequences not supported" in error_msg or "Unsupported parameter" in error_msg and "'stop'" in error_msg:
|
||||||
|
print("CONFIRMED: This is the expected stop sequence error from issue #3317")
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
print("UNEXPECTED: This is a different error")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
test_bedrock_stop_sequence_issue()
|
||||||
@@ -88,13 +88,16 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
tool.name: tool for tool in self.tools
|
tool.name: tool for tool in self.tools
|
||||||
}
|
}
|
||||||
existing_stop = self.llm.stop or []
|
existing_stop = self.llm.stop or []
|
||||||
self.llm.stop = list(
|
if self.use_stop_words:
|
||||||
set(
|
self.llm.stop = list(
|
||||||
existing_stop + self.stop
|
set(
|
||||||
if isinstance(existing_stop, list)
|
existing_stop + self.stop
|
||||||
else self.stop
|
if isinstance(existing_stop, list)
|
||||||
|
else self.stop
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
else:
|
||||||
|
self.llm.stop = existing_stop if isinstance(existing_stop, list) else []
|
||||||
|
|
||||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||||
if "system" in self.prompt:
|
if "system" in self.prompt:
|
||||||
|
|||||||
192
tests/test_bedrock_stop_sequence_fix.py
Normal file
192
tests/test_bedrock_stop_sequence_fix.py
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest.mock import Mock, patch
|
||||||
|
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||||
|
from crewai.llm import LLM
|
||||||
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
|
|
||||||
|
|
||||||
|
class TestBedrockStopSequenceFix:
|
||||||
|
"""Test cases for issue #3317 - Bedrock stop sequence fix"""
|
||||||
|
|
||||||
|
def test_stop_sequences_set_when_supported(self):
|
||||||
|
"""Test that stop sequences are set when LLM supports them"""
|
||||||
|
mock_llm = Mock(spec=LLM)
|
||||||
|
mock_llm.supports_stop_words.return_value = True
|
||||||
|
mock_llm.stop = []
|
||||||
|
|
||||||
|
mock_agent = Mock(spec=BaseAgent)
|
||||||
|
mock_task = Mock()
|
||||||
|
mock_crew = Mock()
|
||||||
|
mock_tools_handler = Mock(spec=ToolsHandler)
|
||||||
|
|
||||||
|
stop_words = ["\nObservation:", "\nThought:"]
|
||||||
|
executor = CrewAgentExecutor(
|
||||||
|
llm=mock_llm,
|
||||||
|
task=mock_task,
|
||||||
|
crew=mock_crew,
|
||||||
|
agent=mock_agent,
|
||||||
|
prompt={"prompt": "test"},
|
||||||
|
max_iter=5,
|
||||||
|
tools=[],
|
||||||
|
tools_names="",
|
||||||
|
stop_words=stop_words,
|
||||||
|
tools_description="",
|
||||||
|
tools_handler=mock_tools_handler
|
||||||
|
)
|
||||||
|
|
||||||
|
assert executor.use_stop_words is True
|
||||||
|
assert mock_llm.stop == stop_words
|
||||||
|
|
||||||
|
def test_stop_sequences_not_set_when_unsupported(self):
|
||||||
|
"""Test that stop sequences are not set when LLM doesn't support them"""
|
||||||
|
mock_llm = Mock(spec=LLM)
|
||||||
|
mock_llm.supports_stop_words.return_value = False
|
||||||
|
mock_llm.stop = []
|
||||||
|
|
||||||
|
mock_agent = Mock(spec=BaseAgent)
|
||||||
|
mock_task = Mock()
|
||||||
|
mock_crew = Mock()
|
||||||
|
mock_tools_handler = Mock(spec=ToolsHandler)
|
||||||
|
|
||||||
|
stop_words = ["\nObservation:", "\nThought:"]
|
||||||
|
executor = CrewAgentExecutor(
|
||||||
|
llm=mock_llm,
|
||||||
|
task=mock_task,
|
||||||
|
crew=mock_crew,
|
||||||
|
agent=mock_agent,
|
||||||
|
prompt={"prompt": "test"},
|
||||||
|
max_iter=5,
|
||||||
|
tools=[],
|
||||||
|
tools_names="",
|
||||||
|
stop_words=stop_words,
|
||||||
|
tools_description="",
|
||||||
|
tools_handler=mock_tools_handler
|
||||||
|
)
|
||||||
|
|
||||||
|
assert executor.use_stop_words is False
|
||||||
|
assert mock_llm.stop == []
|
||||||
|
|
||||||
|
def test_existing_stop_sequences_preserved_when_supported(self):
|
||||||
|
"""Test that existing stop sequences are preserved when adding new ones"""
|
||||||
|
mock_llm = Mock(spec=LLM)
|
||||||
|
mock_llm.supports_stop_words.return_value = True
|
||||||
|
mock_llm.stop = ["existing_stop"]
|
||||||
|
|
||||||
|
mock_agent = Mock(spec=BaseAgent)
|
||||||
|
mock_task = Mock()
|
||||||
|
mock_crew = Mock()
|
||||||
|
mock_tools_handler = Mock(spec=ToolsHandler)
|
||||||
|
|
||||||
|
stop_words = ["\nObservation:"]
|
||||||
|
executor = CrewAgentExecutor(
|
||||||
|
llm=mock_llm,
|
||||||
|
task=mock_task,
|
||||||
|
crew=mock_crew,
|
||||||
|
agent=mock_agent,
|
||||||
|
prompt={"prompt": "test"},
|
||||||
|
max_iter=5,
|
||||||
|
tools=[],
|
||||||
|
tools_names="",
|
||||||
|
stop_words=stop_words,
|
||||||
|
tools_description="",
|
||||||
|
tools_handler=mock_tools_handler
|
||||||
|
)
|
||||||
|
|
||||||
|
assert executor.use_stop_words is True
|
||||||
|
assert "existing_stop" in mock_llm.stop
|
||||||
|
assert "\nObservation:" in mock_llm.stop
|
||||||
|
|
||||||
|
def test_existing_stop_sequences_preserved_when_unsupported(self):
|
||||||
|
"""Test that existing stop sequences are preserved when LLM doesn't support new ones"""
|
||||||
|
mock_llm = Mock(spec=LLM)
|
||||||
|
mock_llm.supports_stop_words.return_value = False
|
||||||
|
mock_llm.stop = ["existing_stop"]
|
||||||
|
|
||||||
|
mock_agent = Mock(spec=BaseAgent)
|
||||||
|
mock_task = Mock()
|
||||||
|
mock_crew = Mock()
|
||||||
|
mock_tools_handler = Mock(spec=ToolsHandler)
|
||||||
|
|
||||||
|
stop_words = ["\nObservation:"]
|
||||||
|
executor = CrewAgentExecutor(
|
||||||
|
llm=mock_llm,
|
||||||
|
task=mock_task,
|
||||||
|
crew=mock_crew,
|
||||||
|
agent=mock_agent,
|
||||||
|
prompt={"prompt": "test"},
|
||||||
|
max_iter=5,
|
||||||
|
tools=[],
|
||||||
|
tools_names="",
|
||||||
|
stop_words=stop_words,
|
||||||
|
tools_description="",
|
||||||
|
tools_handler=mock_tools_handler
|
||||||
|
)
|
||||||
|
|
||||||
|
assert executor.use_stop_words is False
|
||||||
|
assert mock_llm.stop == ["existing_stop"]
|
||||||
|
|
||||||
|
@patch('crewai.llm.get_supported_openai_params')
|
||||||
|
def test_bedrock_model_stop_words_support(self, mock_get_params):
|
||||||
|
"""Test that Bedrock models correctly report stop word support"""
|
||||||
|
mock_get_params.return_value = ['model', 'messages', 'temperature'] # No 'stop'
|
||||||
|
|
||||||
|
llm = LLM(model="bedrock/converse/openai.gpt-oss-20b-1:0")
|
||||||
|
|
||||||
|
assert llm.supports_stop_words() is False
|
||||||
|
|
||||||
|
@patch('crewai.llm.get_supported_openai_params')
|
||||||
|
def test_openai_model_stop_words_support(self, mock_get_params):
|
||||||
|
"""Test that OpenAI models correctly report stop word support"""
|
||||||
|
mock_get_params.return_value = ['model', 'messages', 'temperature', 'stop']
|
||||||
|
|
||||||
|
llm = LLM(model="gpt-4")
|
||||||
|
|
||||||
|
assert llm.supports_stop_words() is True
|
||||||
|
|
||||||
|
def test_use_stop_words_flag_consistency(self):
|
||||||
|
"""Test that use_stop_words flag is consistent with LLM support"""
|
||||||
|
mock_llm_supporting = Mock(spec=LLM)
|
||||||
|
mock_llm_supporting.supports_stop_words.return_value = True
|
||||||
|
mock_llm_supporting.stop = []
|
||||||
|
|
||||||
|
mock_agent = Mock(spec=BaseAgent)
|
||||||
|
mock_task = Mock()
|
||||||
|
mock_crew = Mock()
|
||||||
|
mock_tools_handler = Mock(spec=ToolsHandler)
|
||||||
|
|
||||||
|
executor_supporting = CrewAgentExecutor(
|
||||||
|
llm=mock_llm_supporting,
|
||||||
|
task=mock_task,
|
||||||
|
crew=mock_crew,
|
||||||
|
agent=mock_agent,
|
||||||
|
prompt={"prompt": "test"},
|
||||||
|
max_iter=5,
|
||||||
|
tools=[],
|
||||||
|
tools_names="",
|
||||||
|
stop_words=["\nObservation:"],
|
||||||
|
tools_description="",
|
||||||
|
tools_handler=mock_tools_handler
|
||||||
|
)
|
||||||
|
|
||||||
|
assert executor_supporting.use_stop_words is True
|
||||||
|
|
||||||
|
mock_llm_non_supporting = Mock(spec=LLM)
|
||||||
|
mock_llm_non_supporting.supports_stop_words.return_value = False
|
||||||
|
mock_llm_non_supporting.stop = []
|
||||||
|
|
||||||
|
executor_non_supporting = CrewAgentExecutor(
|
||||||
|
llm=mock_llm_non_supporting,
|
||||||
|
task=mock_task,
|
||||||
|
crew=mock_crew,
|
||||||
|
agent=mock_agent,
|
||||||
|
prompt={"prompt": "test"},
|
||||||
|
max_iter=5,
|
||||||
|
tools=[],
|
||||||
|
tools_names="",
|
||||||
|
stop_words=["\nObservation:"],
|
||||||
|
tools_description="",
|
||||||
|
tools_handler=mock_tools_handler
|
||||||
|
)
|
||||||
|
|
||||||
|
assert executor_non_supporting.use_stop_words is False
|
||||||
Reference in New Issue
Block a user