mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 04:18:35 +00:00
Compare commits
2 Commits
1.2.0
...
devin/1755
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6efdd9ab1c | ||
|
|
a28680a868 |
64
reproduce_issue_3317.py
Normal file
64
reproduce_issue_3317.py
Normal file
@@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Reproduction script for issue #3317
|
||||
CrewAI injects stop sequences causing Bedrock GPT-OSS 400 errors
|
||||
"""
|
||||
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai.llm import LLM
|
||||
|
||||
def test_bedrock_stop_sequence_issue():
|
||||
"""
|
||||
Reproduce the issue where CrewAI automatically injects stop sequences
|
||||
that cause Bedrock models to fail with 'stopSequences not supported' error.
|
||||
"""
|
||||
print("Testing Bedrock stop sequence issue...")
|
||||
|
||||
llm = LLM(
|
||||
model="bedrock/converse/openai.gpt-oss-20b-1:0",
|
||||
litellm_params={
|
||||
"aws_region_name": "us-east-1",
|
||||
"drop_params": True
|
||||
}
|
||||
)
|
||||
|
||||
print(f"Model supports stop words: {llm.supports_stop_words()}")
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test the stop sequence issue",
|
||||
backstory="A test agent to reproduce the issue",
|
||||
llm=llm,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Say hello",
|
||||
expected_output="A simple greeting",
|
||||
agent=agent
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
try:
|
||||
result = crew.kickoff()
|
||||
print("SUCCESS: No error occurred")
|
||||
print(f"Result: {result}")
|
||||
return True
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
print(f"ERROR: {error_msg}")
|
||||
|
||||
if "stopSequences not supported" in error_msg or "Unsupported parameter" in error_msg and "'stop'" in error_msg:
|
||||
print("CONFIRMED: This is the expected stop sequence error from issue #3317")
|
||||
return False
|
||||
else:
|
||||
print("UNEXPECTED: This is a different error")
|
||||
raise e
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_bedrock_stop_sequence_issue()
|
||||
@@ -88,13 +88,16 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
tool.name: tool for tool in self.tools
|
||||
}
|
||||
existing_stop = self.llm.stop or []
|
||||
self.llm.stop = list(
|
||||
set(
|
||||
existing_stop + self.stop
|
||||
if isinstance(existing_stop, list)
|
||||
else self.stop
|
||||
if self.use_stop_words:
|
||||
self.llm.stop = list(
|
||||
set(
|
||||
existing_stop + self.stop
|
||||
if isinstance(existing_stop, list)
|
||||
else self.stop
|
||||
)
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.llm.stop = existing_stop if isinstance(existing_stop, list) else []
|
||||
|
||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||
if "system" in self.prompt:
|
||||
|
||||
191
tests/test_bedrock_stop_sequence_fix.py
Normal file
191
tests/test_bedrock_stop_sequence_fix.py
Normal file
@@ -0,0 +1,191 @@
|
||||
from unittest.mock import Mock, patch
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.llm import LLM
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
|
||||
|
||||
class TestBedrockStopSequenceFix:
|
||||
"""Test cases for issue #3317 - Bedrock stop sequence fix"""
|
||||
|
||||
def test_stop_sequences_set_when_supported(self):
|
||||
"""Test that stop sequences are set when LLM supports them"""
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.supports_stop_words.return_value = True
|
||||
mock_llm.stop = []
|
||||
|
||||
mock_agent = Mock(spec=BaseAgent)
|
||||
mock_task = Mock()
|
||||
mock_crew = Mock()
|
||||
mock_tools_handler = Mock(spec=ToolsHandler)
|
||||
|
||||
stop_words = ["\nObservation:", "\nThought:"]
|
||||
executor = CrewAgentExecutor(
|
||||
llm=mock_llm,
|
||||
task=mock_task,
|
||||
crew=mock_crew,
|
||||
agent=mock_agent,
|
||||
prompt={"prompt": "test"},
|
||||
max_iter=5,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=stop_words,
|
||||
tools_description="",
|
||||
tools_handler=mock_tools_handler
|
||||
)
|
||||
|
||||
assert executor.use_stop_words is True
|
||||
assert mock_llm.stop == stop_words
|
||||
|
||||
def test_stop_sequences_not_set_when_unsupported(self):
|
||||
"""Test that stop sequences are not set when LLM doesn't support them"""
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.stop = []
|
||||
|
||||
mock_agent = Mock(spec=BaseAgent)
|
||||
mock_task = Mock()
|
||||
mock_crew = Mock()
|
||||
mock_tools_handler = Mock(spec=ToolsHandler)
|
||||
|
||||
stop_words = ["\nObservation:", "\nThought:"]
|
||||
executor = CrewAgentExecutor(
|
||||
llm=mock_llm,
|
||||
task=mock_task,
|
||||
crew=mock_crew,
|
||||
agent=mock_agent,
|
||||
prompt={"prompt": "test"},
|
||||
max_iter=5,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=stop_words,
|
||||
tools_description="",
|
||||
tools_handler=mock_tools_handler
|
||||
)
|
||||
|
||||
assert executor.use_stop_words is False
|
||||
assert mock_llm.stop == []
|
||||
|
||||
def test_existing_stop_sequences_preserved_when_supported(self):
|
||||
"""Test that existing stop sequences are preserved when adding new ones"""
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.supports_stop_words.return_value = True
|
||||
mock_llm.stop = ["existing_stop"]
|
||||
|
||||
mock_agent = Mock(spec=BaseAgent)
|
||||
mock_task = Mock()
|
||||
mock_crew = Mock()
|
||||
mock_tools_handler = Mock(spec=ToolsHandler)
|
||||
|
||||
stop_words = ["\nObservation:"]
|
||||
executor = CrewAgentExecutor(
|
||||
llm=mock_llm,
|
||||
task=mock_task,
|
||||
crew=mock_crew,
|
||||
agent=mock_agent,
|
||||
prompt={"prompt": "test"},
|
||||
max_iter=5,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=stop_words,
|
||||
tools_description="",
|
||||
tools_handler=mock_tools_handler
|
||||
)
|
||||
|
||||
assert executor.use_stop_words is True
|
||||
assert "existing_stop" in mock_llm.stop
|
||||
assert "\nObservation:" in mock_llm.stop
|
||||
|
||||
def test_existing_stop_sequences_preserved_when_unsupported(self):
|
||||
"""Test that existing stop sequences are preserved when LLM doesn't support new ones"""
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.stop = ["existing_stop"]
|
||||
|
||||
mock_agent = Mock(spec=BaseAgent)
|
||||
mock_task = Mock()
|
||||
mock_crew = Mock()
|
||||
mock_tools_handler = Mock(spec=ToolsHandler)
|
||||
|
||||
stop_words = ["\nObservation:"]
|
||||
executor = CrewAgentExecutor(
|
||||
llm=mock_llm,
|
||||
task=mock_task,
|
||||
crew=mock_crew,
|
||||
agent=mock_agent,
|
||||
prompt={"prompt": "test"},
|
||||
max_iter=5,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=stop_words,
|
||||
tools_description="",
|
||||
tools_handler=mock_tools_handler
|
||||
)
|
||||
|
||||
assert executor.use_stop_words is False
|
||||
assert mock_llm.stop == ["existing_stop"]
|
||||
|
||||
@patch('crewai.llm.get_supported_openai_params')
|
||||
def test_bedrock_model_stop_words_support(self, mock_get_params):
|
||||
"""Test that Bedrock models correctly report stop word support"""
|
||||
mock_get_params.return_value = ['model', 'messages', 'temperature'] # No 'stop'
|
||||
|
||||
llm = LLM(model="bedrock/converse/openai.gpt-oss-20b-1:0")
|
||||
|
||||
assert llm.supports_stop_words() is False
|
||||
|
||||
@patch('crewai.llm.get_supported_openai_params')
|
||||
def test_openai_model_stop_words_support(self, mock_get_params):
|
||||
"""Test that OpenAI models correctly report stop word support"""
|
||||
mock_get_params.return_value = ['model', 'messages', 'temperature', 'stop']
|
||||
|
||||
llm = LLM(model="gpt-4")
|
||||
|
||||
assert llm.supports_stop_words() is True
|
||||
|
||||
def test_use_stop_words_flag_consistency(self):
|
||||
"""Test that use_stop_words flag is consistent with LLM support"""
|
||||
mock_llm_supporting = Mock(spec=LLM)
|
||||
mock_llm_supporting.supports_stop_words.return_value = True
|
||||
mock_llm_supporting.stop = []
|
||||
|
||||
mock_agent = Mock(spec=BaseAgent)
|
||||
mock_task = Mock()
|
||||
mock_crew = Mock()
|
||||
mock_tools_handler = Mock(spec=ToolsHandler)
|
||||
|
||||
executor_supporting = CrewAgentExecutor(
|
||||
llm=mock_llm_supporting,
|
||||
task=mock_task,
|
||||
crew=mock_crew,
|
||||
agent=mock_agent,
|
||||
prompt={"prompt": "test"},
|
||||
max_iter=5,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=["\nObservation:"],
|
||||
tools_description="",
|
||||
tools_handler=mock_tools_handler
|
||||
)
|
||||
|
||||
assert executor_supporting.use_stop_words is True
|
||||
|
||||
mock_llm_non_supporting = Mock(spec=LLM)
|
||||
mock_llm_non_supporting.supports_stop_words.return_value = False
|
||||
mock_llm_non_supporting.stop = []
|
||||
|
||||
executor_non_supporting = CrewAgentExecutor(
|
||||
llm=mock_llm_non_supporting,
|
||||
task=mock_task,
|
||||
crew=mock_crew,
|
||||
agent=mock_agent,
|
||||
prompt={"prompt": "test"},
|
||||
max_iter=5,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=["\nObservation:"],
|
||||
tools_description="",
|
||||
tools_handler=mock_tools_handler
|
||||
)
|
||||
|
||||
assert executor_non_supporting.use_stop_words is False
|
||||
Reference in New Issue
Block a user