mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 16:18:30 +00:00
fix: keep stopwords updated
This commit is contained in:
@@ -664,3 +664,37 @@ def test_anthropic_token_usage_tracking():
|
||||
assert usage["input_tokens"] == 50
|
||||
assert usage["output_tokens"] == 25
|
||||
assert usage["total_tokens"] == 75
|
||||
|
||||
|
||||
def test_anthropic_stop_sequences_sync():
|
||||
"""Test that stop and stop_sequences attributes stay synchronized."""
|
||||
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
|
||||
|
||||
# Test setting stop as a list
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop_sequences == ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop == ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Test setting stop as a string
|
||||
llm.stop = "\nFinal Answer:"
|
||||
assert llm.stop_sequences == ["\nFinal Answer:"]
|
||||
assert llm.stop == ["\nFinal Answer:"]
|
||||
|
||||
# Test setting stop as None
|
||||
llm.stop = None
|
||||
assert llm.stop_sequences == []
|
||||
assert llm.stop == []
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
|
||||
def test_anthropic_stop_sequences_sent_to_api():
|
||||
"""Test that stop_sequences are properly sent to the Anthropic API."""
|
||||
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
|
||||
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
|
||||
result = llm.call("Say hello in one word")
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
@@ -736,3 +736,56 @@ def test_bedrock_client_error_handling():
|
||||
with pytest.raises(RuntimeError) as exc_info:
|
||||
llm.call("Hello")
|
||||
assert "throttled" in str(exc_info.value).lower()
|
||||
|
||||
|
||||
def test_bedrock_stop_sequences_sync():
|
||||
"""Test that stop and stop_sequences attributes stay synchronized."""
|
||||
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
|
||||
|
||||
# Test setting stop as a list
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
assert list(llm.stop_sequences) == ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop == ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Test setting stop as a string
|
||||
llm.stop = "\nFinal Answer:"
|
||||
assert list(llm.stop_sequences) == ["\nFinal Answer:"]
|
||||
assert llm.stop == ["\nFinal Answer:"]
|
||||
|
||||
# Test setting stop as None
|
||||
llm.stop = None
|
||||
assert list(llm.stop_sequences) == []
|
||||
assert llm.stop == []
|
||||
|
||||
|
||||
def test_bedrock_stop_sequences_sent_to_api():
|
||||
"""Test that stop_sequences are properly sent to the Bedrock API."""
|
||||
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
|
||||
|
||||
# Set stop sequences via the stop attribute (simulating CrewAgentExecutor)
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Patch the API call to capture parameters without making real call
|
||||
with patch.object(llm.client, 'converse') as mock_converse:
|
||||
mock_response = {
|
||||
'output': {
|
||||
'message': {
|
||||
'role': 'assistant',
|
||||
'content': [{'text': 'Hello'}]
|
||||
}
|
||||
},
|
||||
'usage': {
|
||||
'inputTokens': 10,
|
||||
'outputTokens': 5,
|
||||
'totalTokens': 15
|
||||
}
|
||||
}
|
||||
mock_converse.return_value = mock_response
|
||||
|
||||
llm.call("Say hello in one word")
|
||||
|
||||
# Verify stop_sequences were passed to the API in the inference config
|
||||
call_kwargs = mock_converse.call_args[1]
|
||||
assert "inferenceConfig" in call_kwargs
|
||||
assert "stopSequences" in call_kwargs["inferenceConfig"]
|
||||
assert call_kwargs["inferenceConfig"]["stopSequences"] == ["\nObservation:", "\nThought:"]
|
||||
|
||||
@@ -648,3 +648,55 @@ def test_gemini_token_usage_tracking():
|
||||
assert usage["candidates_token_count"] == 25
|
||||
assert usage["total_token_count"] == 75
|
||||
assert usage["total_tokens"] == 75
|
||||
|
||||
|
||||
def test_gemini_stop_sequences_sync():
|
||||
"""Test that stop and stop_sequences attributes stay synchronized."""
|
||||
llm = LLM(model="google/gemini-2.0-flash-001")
|
||||
|
||||
# Test setting stop as a list
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop_sequences == ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop == ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Test setting stop as a string
|
||||
llm.stop = "\nFinal Answer:"
|
||||
assert llm.stop_sequences == ["\nFinal Answer:"]
|
||||
assert llm.stop == ["\nFinal Answer:"]
|
||||
|
||||
# Test setting stop as None
|
||||
llm.stop = None
|
||||
assert llm.stop_sequences == []
|
||||
assert llm.stop == []
|
||||
|
||||
|
||||
def test_gemini_stop_sequences_sent_to_api():
|
||||
"""Test that stop_sequences are properly sent to the Gemini API."""
|
||||
llm = LLM(model="google/gemini-2.0-flash-001")
|
||||
|
||||
# Set stop sequences via the stop attribute (simulating CrewAgentExecutor)
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Patch the API call to capture parameters without making real call
|
||||
with patch.object(llm.client.models, 'generate_content') as mock_generate:
|
||||
mock_response = MagicMock()
|
||||
mock_response.text = "Hello"
|
||||
mock_response.candidates = []
|
||||
mock_response.usage_metadata = MagicMock(
|
||||
prompt_token_count=10,
|
||||
candidates_token_count=5,
|
||||
total_token_count=15
|
||||
)
|
||||
mock_generate.return_value = mock_response
|
||||
|
||||
llm.call("Say hello in one word")
|
||||
|
||||
# Verify stop_sequences were passed to the API in the config
|
||||
call_kwargs = mock_generate.call_args[1]
|
||||
assert "config" in call_kwargs
|
||||
# The config object should have stop_sequences set
|
||||
config = call_kwargs["config"]
|
||||
# Check if the config has stop_sequences attribute
|
||||
assert hasattr(config, 'stop_sequences') or 'stop_sequences' in config.__dict__
|
||||
if hasattr(config, 'stop_sequences'):
|
||||
assert config.stop_sequences == ["\nObservation:", "\nThought:"]
|
||||
|
||||
Reference in New Issue
Block a user