more timeouts

This commit is contained in:
Brandon Hancock
2025-01-02 11:08:51 -05:00
parent e7696f9b07
commit ab93fdd348
9 changed files with 378 additions and 185 deletions

View File

@@ -15,6 +15,7 @@ def parser():
return p return p
@pytest.mark.timeout(60)
def test_valid_action_parsing_special_characters(parser): def test_valid_action_parsing_special_characters(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: what's the temperature in SF?" text = "Thought: Let's find the temperature\nAction: search\nAction Input: what's the temperature in SF?"
result = parser.parse(text) result = parser.parse(text)
@@ -23,6 +24,7 @@ def test_valid_action_parsing_special_characters(parser):
assert result.tool_input == "what's the temperature in SF?" assert result.tool_input == "what's the temperature in SF?"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_json_tool_input(parser): def test_valid_action_parsing_with_json_tool_input(parser):
text = """ text = """
Thought: Let's find the information Thought: Let's find the information
@@ -36,6 +38,7 @@ def test_valid_action_parsing_with_json_tool_input(parser):
assert result.tool_input == expected_tool_input assert result.tool_input == expected_tool_input
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_quotes(parser): def test_valid_action_parsing_with_quotes(parser):
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "temperature in SF"' text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "temperature in SF"'
result = parser.parse(text) result = parser.parse(text)
@@ -44,6 +47,7 @@ def test_valid_action_parsing_with_quotes(parser):
assert result.tool_input == "temperature in SF" assert result.tool_input == "temperature in SF"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_curly_braces(parser): def test_valid_action_parsing_with_curly_braces(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: {temperature in SF}" text = "Thought: Let's find the temperature\nAction: search\nAction Input: {temperature in SF}"
result = parser.parse(text) result = parser.parse(text)
@@ -52,6 +56,7 @@ def test_valid_action_parsing_with_curly_braces(parser):
assert result.tool_input == "{temperature in SF}" assert result.tool_input == "{temperature in SF}"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_angle_brackets(parser): def test_valid_action_parsing_with_angle_brackets(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: <temperature in SF>" text = "Thought: Let's find the temperature\nAction: search\nAction Input: <temperature in SF>"
result = parser.parse(text) result = parser.parse(text)
@@ -60,6 +65,7 @@ def test_valid_action_parsing_with_angle_brackets(parser):
assert result.tool_input == "<temperature in SF>" assert result.tool_input == "<temperature in SF>"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_parentheses(parser): def test_valid_action_parsing_with_parentheses(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: (temperature in SF)" text = "Thought: Let's find the temperature\nAction: search\nAction Input: (temperature in SF)"
result = parser.parse(text) result = parser.parse(text)
@@ -68,6 +74,7 @@ def test_valid_action_parsing_with_parentheses(parser):
assert result.tool_input == "(temperature in SF)" assert result.tool_input == "(temperature in SF)"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_mixed_brackets(parser): def test_valid_action_parsing_with_mixed_brackets(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: [temperature in {SF}]" text = "Thought: Let's find the temperature\nAction: search\nAction Input: [temperature in {SF}]"
result = parser.parse(text) result = parser.parse(text)
@@ -76,6 +83,7 @@ def test_valid_action_parsing_with_mixed_brackets(parser):
assert result.tool_input == "[temperature in {SF}]" assert result.tool_input == "[temperature in {SF}]"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_nested_quotes(parser): def test_valid_action_parsing_with_nested_quotes(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what's the temperature in 'SF'?\"" text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what's the temperature in 'SF'?\""
result = parser.parse(text) result = parser.parse(text)
@@ -84,6 +92,7 @@ def test_valid_action_parsing_with_nested_quotes(parser):
assert result.tool_input == "what's the temperature in 'SF'?" assert result.tool_input == "what's the temperature in 'SF'?"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_incomplete_json(parser): def test_valid_action_parsing_with_incomplete_json(parser):
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: {"query": "temperature in SF"' text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: {"query": "temperature in SF"'
result = parser.parse(text) result = parser.parse(text)
@@ -92,6 +101,7 @@ def test_valid_action_parsing_with_incomplete_json(parser):
assert result.tool_input == '{"query": "temperature in SF"}' assert result.tool_input == '{"query": "temperature in SF"}'
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_special_characters(parser): def test_valid_action_parsing_with_special_characters(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is the temperature in SF? @$%^&*" text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is the temperature in SF? @$%^&*"
result = parser.parse(text) result = parser.parse(text)
@@ -100,6 +110,7 @@ def test_valid_action_parsing_with_special_characters(parser):
assert result.tool_input == "what is the temperature in SF? @$%^&*" assert result.tool_input == "what is the temperature in SF? @$%^&*"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_combination(parser): def test_valid_action_parsing_with_combination(parser):
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "[what is the temperature in SF?]"' text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "[what is the temperature in SF?]"'
result = parser.parse(text) result = parser.parse(text)
@@ -108,6 +119,7 @@ def test_valid_action_parsing_with_combination(parser):
assert result.tool_input == "[what is the temperature in SF?]" assert result.tool_input == "[what is the temperature in SF?]"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_mixed_quotes(parser): def test_valid_action_parsing_with_mixed_quotes(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what's the temperature in SF?\"" text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what's the temperature in SF?\""
result = parser.parse(text) result = parser.parse(text)
@@ -116,6 +128,7 @@ def test_valid_action_parsing_with_mixed_quotes(parser):
assert result.tool_input == "what's the temperature in SF?" assert result.tool_input == "what's the temperature in SF?"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_newlines(parser): def test_valid_action_parsing_with_newlines(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is\nthe temperature in SF?" text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is\nthe temperature in SF?"
result = parser.parse(text) result = parser.parse(text)
@@ -124,6 +137,7 @@ def test_valid_action_parsing_with_newlines(parser):
assert result.tool_input == "what is\nthe temperature in SF?" assert result.tool_input == "what is\nthe temperature in SF?"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_escaped_characters(parser): def test_valid_action_parsing_with_escaped_characters(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is the temperature in SF? \\n" text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is the temperature in SF? \\n"
result = parser.parse(text) result = parser.parse(text)
@@ -132,6 +146,7 @@ def test_valid_action_parsing_with_escaped_characters(parser):
assert result.tool_input == "what is the temperature in SF? \\n" assert result.tool_input == "what is the temperature in SF? \\n"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_json_string(parser): def test_valid_action_parsing_with_json_string(parser):
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: {"query": "temperature in SF"}' text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: {"query": "temperature in SF"}'
result = parser.parse(text) result = parser.parse(text)
@@ -140,6 +155,7 @@ def test_valid_action_parsing_with_json_string(parser):
assert result.tool_input == '{"query": "temperature in SF"}' assert result.tool_input == '{"query": "temperature in SF"}'
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_unbalanced_quotes(parser): def test_valid_action_parsing_with_unbalanced_quotes(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what is the temperature in SF?" text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what is the temperature in SF?"
result = parser.parse(text) result = parser.parse(text)
@@ -148,60 +164,70 @@ def test_valid_action_parsing_with_unbalanced_quotes(parser):
assert result.tool_input == "what is the temperature in SF?" assert result.tool_input == "what is the temperature in SF?"
@pytest.mark.timeout(60)
def test_clean_action_no_formatting(parser): def test_clean_action_no_formatting(parser):
action = "Ask question to senior researcher" action = "Ask question to senior researcher"
cleaned_action = parser._clean_action(action) cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher" assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_leading_asterisks(parser): def test_clean_action_with_leading_asterisks(parser):
action = "** Ask question to senior researcher" action = "** Ask question to senior researcher"
cleaned_action = parser._clean_action(action) cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher" assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_trailing_asterisks(parser): def test_clean_action_with_trailing_asterisks(parser):
action = "Ask question to senior researcher **" action = "Ask question to senior researcher **"
cleaned_action = parser._clean_action(action) cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher" assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_leading_and_trailing_asterisks(parser): def test_clean_action_with_leading_and_trailing_asterisks(parser):
action = "** Ask question to senior researcher **" action = "** Ask question to senior researcher **"
cleaned_action = parser._clean_action(action) cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher" assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_multiple_leading_asterisks(parser): def test_clean_action_with_multiple_leading_asterisks(parser):
action = "**** Ask question to senior researcher" action = "**** Ask question to senior researcher"
cleaned_action = parser._clean_action(action) cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher" assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_multiple_trailing_asterisks(parser): def test_clean_action_with_multiple_trailing_asterisks(parser):
action = "Ask question to senior researcher ****" action = "Ask question to senior researcher ****"
cleaned_action = parser._clean_action(action) cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher" assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_spaces_and_asterisks(parser): def test_clean_action_with_spaces_and_asterisks(parser):
action = " ** Ask question to senior researcher ** " action = " ** Ask question to senior researcher ** "
cleaned_action = parser._clean_action(action) cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher" assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_only_asterisks(parser): def test_clean_action_with_only_asterisks(parser):
action = "****" action = "****"
cleaned_action = parser._clean_action(action) cleaned_action = parser._clean_action(action)
assert cleaned_action == "" assert cleaned_action == ""
@pytest.mark.timeout(60)
def test_clean_action_with_empty_string(parser): def test_clean_action_with_empty_string(parser):
action = "" action = ""
cleaned_action = parser._clean_action(action) cleaned_action = parser._clean_action(action)
assert cleaned_action == "" assert cleaned_action == ""
@pytest.mark.timeout(60)
def test_valid_final_answer_parsing(parser): def test_valid_final_answer_parsing(parser):
text = ( text = (
"Thought: I found the information\nFinal Answer: The temperature is 100 degrees" "Thought: I found the information\nFinal Answer: The temperature is 100 degrees"
@@ -211,6 +237,7 @@ def test_valid_final_answer_parsing(parser):
assert result.output == "The temperature is 100 degrees" assert result.output == "The temperature is 100 degrees"
@pytest.mark.timeout(60)
def test_missing_action_error(parser): def test_missing_action_error(parser):
text = "Thought: Let's find the temperature\nAction Input: what is the temperature in SF?" text = "Thought: Let's find the temperature\nAction Input: what is the temperature in SF?"
with pytest.raises(OutputParserException) as exc_info: with pytest.raises(OutputParserException) as exc_info:
@@ -220,6 +247,7 @@ def test_missing_action_error(parser):
) )
@pytest.mark.timeout(60)
def test_missing_action_input_error(parser): def test_missing_action_input_error(parser):
text = "Thought: Let's find the temperature\nAction: search" text = "Thought: Let's find the temperature\nAction: search"
with pytest.raises(OutputParserException) as exc_info: with pytest.raises(OutputParserException) as exc_info:
@@ -227,6 +255,7 @@ def test_missing_action_input_error(parser):
assert "I missed the 'Action Input:' after 'Action:'." in str(exc_info.value) assert "I missed the 'Action Input:' after 'Action:'." in str(exc_info.value)
@pytest.mark.timeout(60)
def test_action_and_final_answer_error(parser): def test_action_and_final_answer_error(parser):
text = "Thought: I found the information\nAction: search\nAction Input: what is the temperature in SF?\nFinal Answer: The temperature is 100 degrees" text = "Thought: I found the information\nAction: search\nAction Input: what is the temperature in SF?\nFinal Answer: The temperature is 100 degrees"
with pytest.raises(OutputParserException) as exc_info: with pytest.raises(OutputParserException) as exc_info:
@@ -234,6 +263,7 @@ def test_action_and_final_answer_error(parser):
assert "both perform Action and give a Final Answer" in str(exc_info.value) assert "both perform Action and give a Final Answer" in str(exc_info.value)
@pytest.mark.timeout(60)
def test_safe_repair_json(parser): def test_safe_repair_json(parser):
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": Senior Researcher' invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": Senior Researcher'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -241,12 +271,14 @@ def test_safe_repair_json(parser):
assert result == expected_repaired_json assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_unrepairable(parser): def test_safe_repair_json_unrepairable(parser):
invalid_json = "{invalid_json" invalid_json = "{invalid_json"
result = parser._safe_repair_json(invalid_json) result = parser._safe_repair_json(invalid_json)
assert result == invalid_json # Should return the original if unrepairable assert result == invalid_json # Should return the original if unrepairable
@pytest.mark.timeout(60)
def test_safe_repair_json_missing_quotes(parser): def test_safe_repair_json_missing_quotes(parser):
invalid_json = ( invalid_json = (
'{task: "Research XAI", context: "Explainable AI", coworker: Senior Researcher}' '{task: "Research XAI", context: "Explainable AI", coworker: Senior Researcher}'
@@ -256,6 +288,7 @@ def test_safe_repair_json_missing_quotes(parser):
assert result == expected_repaired_json assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_unclosed_brackets(parser): def test_safe_repair_json_unclosed_brackets(parser):
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"' invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -263,6 +296,7 @@ def test_safe_repair_json_unclosed_brackets(parser):
assert result == expected_repaired_json assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_extra_commas(parser): def test_safe_repair_json_extra_commas(parser):
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher",}' invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher",}'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -270,6 +304,7 @@ def test_safe_repair_json_extra_commas(parser):
assert result == expected_repaired_json assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_trailing_commas(parser): def test_safe_repair_json_trailing_commas(parser):
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher",}' invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher",}'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -277,6 +312,7 @@ def test_safe_repair_json_trailing_commas(parser):
assert result == expected_repaired_json assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_single_quotes(parser): def test_safe_repair_json_single_quotes(parser):
invalid_json = "{'task': 'Research XAI', 'context': 'Explainable AI', 'coworker': 'Senior Researcher'}" invalid_json = "{'task': 'Research XAI', 'context': 'Explainable AI', 'coworker': 'Senior Researcher'}"
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -284,6 +320,7 @@ def test_safe_repair_json_single_quotes(parser):
assert result == expected_repaired_json assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_mixed_quotes(parser): def test_safe_repair_json_mixed_quotes(parser):
invalid_json = "{'task': \"Research XAI\", 'context': \"Explainable AI\", 'coworker': 'Senior Researcher'}" invalid_json = "{'task': \"Research XAI\", 'context': \"Explainable AI\", 'coworker': 'Senior Researcher'}"
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -291,6 +328,7 @@ def test_safe_repair_json_mixed_quotes(parser):
assert result == expected_repaired_json assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_unescaped_characters(parser): def test_safe_repair_json_unescaped_characters(parser):
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher\n"}' invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher\n"}'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -298,6 +336,7 @@ def test_safe_repair_json_unescaped_characters(parser):
assert result == expected_repaired_json assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_missing_colon(parser): def test_safe_repair_json_missing_colon(parser):
invalid_json = '{"task" "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' invalid_json = '{"task" "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -305,6 +344,7 @@ def test_safe_repair_json_missing_colon(parser):
assert result == expected_repaired_json assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_missing_comma(parser): def test_safe_repair_json_missing_comma(parser):
invalid_json = '{"task": "Research XAI" "context": "Explainable AI", "coworker": "Senior Researcher"}' invalid_json = '{"task": "Research XAI" "context": "Explainable AI", "coworker": "Senior Researcher"}'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -312,6 +352,7 @@ def test_safe_repair_json_missing_comma(parser):
assert result == expected_repaired_json assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_unexpected_trailing_characters(parser): def test_safe_repair_json_unexpected_trailing_characters(parser):
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"} random text' invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"} random text'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -319,6 +360,7 @@ def test_safe_repair_json_unexpected_trailing_characters(parser):
assert result == expected_repaired_json assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_special_characters_key(parser): def test_safe_repair_json_special_characters_key(parser):
invalid_json = '{"task!@#": "Research XAI", "context$%^": "Explainable AI", "coworker&*()": "Senior Researcher"}' invalid_json = '{"task!@#": "Research XAI", "context$%^": "Explainable AI", "coworker&*()": "Senior Researcher"}'
expected_repaired_json = '{"task!@#": "Research XAI", "context$%^": "Explainable AI", "coworker&*()": "Senior Researcher"}' expected_repaired_json = '{"task!@#": "Research XAI", "context$%^": "Explainable AI", "coworker&*()": "Senior Researcher"}'
@@ -326,6 +368,7 @@ def test_safe_repair_json_special_characters_key(parser):
assert result == expected_repaired_json assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_parsing_with_whitespace(parser): def test_parsing_with_whitespace(parser):
text = " Thought: Let's find the temperature \n Action: search \n Action Input: what is the temperature in SF? " text = " Thought: Let's find the temperature \n Action: search \n Action Input: what is the temperature in SF? "
result = parser.parse(text) result = parser.parse(text)
@@ -334,6 +377,7 @@ def test_parsing_with_whitespace(parser):
assert result.tool_input == "what is the temperature in SF?" assert result.tool_input == "what is the temperature in SF?"
@pytest.mark.timeout(60)
def test_parsing_with_special_characters(parser): def test_parsing_with_special_characters(parser):
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "what is the temperature in SF?"' text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "what is the temperature in SF?"'
result = parser.parse(text) result = parser.parse(text)
@@ -342,6 +386,7 @@ def test_parsing_with_special_characters(parser):
assert result.tool_input == "what is the temperature in SF?" assert result.tool_input == "what is the temperature in SF?"
@pytest.mark.timeout(60)
def test_integration_valid_and_invalid(parser): def test_integration_valid_and_invalid(parser):
text = """ text = """
Thought: Let's find the temperature Thought: Let's find the temperature

View File

@@ -1,6 +1,7 @@
import unittest import unittest
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
import pytest
import requests import requests
from crewai.cli.authentication.main import AuthenticationCommand from crewai.cli.authentication.main import AuthenticationCommand
@@ -10,6 +11,7 @@ class TestAuthenticationCommand(unittest.TestCase):
def setUp(self): def setUp(self):
self.auth_command = AuthenticationCommand() self.auth_command = AuthenticationCommand()
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.main.requests.post") @patch("crewai.cli.authentication.main.requests.post")
def test_get_device_code(self, mock_post): def test_get_device_code(self, mock_post):
mock_response = MagicMock() mock_response = MagicMock()
@@ -30,6 +32,7 @@ class TestAuthenticationCommand(unittest.TestCase):
) )
self.assertEqual(device_code_data["interval"], 5) self.assertEqual(device_code_data["interval"], 5)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.main.console.print") @patch("crewai.cli.authentication.main.console.print")
@patch("crewai.cli.authentication.main.webbrowser.open") @patch("crewai.cli.authentication.main.webbrowser.open")
def test_display_auth_instructions(self, mock_open, mock_print): def test_display_auth_instructions(self, mock_open, mock_print):
@@ -44,6 +47,7 @@ class TestAuthenticationCommand(unittest.TestCase):
mock_print.assert_any_call("2. Enter the following code: ", "ABCDEF") mock_print.assert_any_call("2. Enter the following code: ", "ABCDEF")
mock_open.assert_called_once_with("https://example.com") mock_open.assert_called_once_with("https://example.com")
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.main.ToolCommand") @patch("crewai.cli.authentication.main.ToolCommand")
@patch("crewai.cli.authentication.main.requests.post") @patch("crewai.cli.authentication.main.requests.post")
@patch("crewai.cli.authentication.main.validate_token") @patch("crewai.cli.authentication.main.validate_token")
@@ -69,6 +73,7 @@ class TestAuthenticationCommand(unittest.TestCase):
"\n[bold green]Welcome to CrewAI Enterprise![/bold green]\n" "\n[bold green]Welcome to CrewAI Enterprise![/bold green]\n"
) )
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.main.requests.post") @patch("crewai.cli.authentication.main.requests.post")
@patch("crewai.cli.authentication.main.console.print") @patch("crewai.cli.authentication.main.console.print")
def test_poll_for_token_error(self, mock_print, mock_post): def test_poll_for_token_error(self, mock_print, mock_post):
@@ -85,6 +90,7 @@ class TestAuthenticationCommand(unittest.TestCase):
mock_print.assert_not_called() mock_print.assert_not_called()
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.main.requests.post") @patch("crewai.cli.authentication.main.requests.post")
@patch("crewai.cli.authentication.main.console.print") @patch("crewai.cli.authentication.main.console.print")
def test_poll_for_token_timeout(self, mock_print, mock_post): def test_poll_for_token_timeout(self, mock_print, mock_post):

View File

@@ -3,12 +3,14 @@ import unittest
from datetime import datetime, timedelta from datetime import datetime, timedelta
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
import pytest
from cryptography.fernet import Fernet from cryptography.fernet import Fernet
from crewai.cli.authentication.utils import TokenManager, validate_token from crewai.cli.authentication.utils import TokenManager, validate_token
class TestValidateToken(unittest.TestCase): class TestValidateToken(unittest.TestCase):
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.AsymmetricSignatureVerifier") @patch("crewai.cli.authentication.utils.AsymmetricSignatureVerifier")
@patch("crewai.cli.authentication.utils.TokenVerifier") @patch("crewai.cli.authentication.utils.TokenVerifier")
def test_validate_token(self, mock_token_verifier, mock_asymmetric_verifier): def test_validate_token(self, mock_token_verifier, mock_asymmetric_verifier):
@@ -32,6 +34,7 @@ class TestTokenManager(unittest.TestCase):
def setUp(self): def setUp(self):
self.token_manager = TokenManager() self.token_manager = TokenManager()
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.read_secure_file") @patch("crewai.cli.authentication.utils.TokenManager.read_secure_file")
@patch("crewai.cli.authentication.utils.TokenManager.save_secure_file") @patch("crewai.cli.authentication.utils.TokenManager.save_secure_file")
@patch("crewai.cli.authentication.utils.TokenManager._get_or_create_key") @patch("crewai.cli.authentication.utils.TokenManager._get_or_create_key")
@@ -44,6 +47,7 @@ class TestTokenManager(unittest.TestCase):
self.assertEqual(result, mock_key) self.assertEqual(result, mock_key)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.Fernet.generate_key") @patch("crewai.cli.authentication.utils.Fernet.generate_key")
@patch("crewai.cli.authentication.utils.TokenManager.read_secure_file") @patch("crewai.cli.authentication.utils.TokenManager.read_secure_file")
@patch("crewai.cli.authentication.utils.TokenManager.save_secure_file") @patch("crewai.cli.authentication.utils.TokenManager.save_secure_file")
@@ -59,6 +63,7 @@ class TestTokenManager(unittest.TestCase):
mock_generate.assert_called_once() mock_generate.assert_called_once()
mock_save.assert_called_once_with("secret.key", mock_key) mock_save.assert_called_once_with("secret.key", mock_key)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.save_secure_file") @patch("crewai.cli.authentication.utils.TokenManager.save_secure_file")
def test_save_tokens(self, mock_save): def test_save_tokens(self, mock_save):
access_token = "test_token" access_token = "test_token"
@@ -79,6 +84,7 @@ class TestTokenManager(unittest.TestCase):
delta=timedelta(seconds=1), delta=timedelta(seconds=1),
) )
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.read_secure_file") @patch("crewai.cli.authentication.utils.TokenManager.read_secure_file")
def test_get_token_valid(self, mock_read): def test_get_token_valid(self, mock_read):
access_token = "test_token" access_token = "test_token"
@@ -91,6 +97,7 @@ class TestTokenManager(unittest.TestCase):
self.assertEqual(result, access_token) self.assertEqual(result, access_token)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.read_secure_file") @patch("crewai.cli.authentication.utils.TokenManager.read_secure_file")
def test_get_token_expired(self, mock_read): def test_get_token_expired(self, mock_read):
access_token = "test_token" access_token = "test_token"
@@ -103,6 +110,7 @@ class TestTokenManager(unittest.TestCase):
self.assertIsNone(result) self.assertIsNone(result)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.get_secure_storage_path") @patch("crewai.cli.authentication.utils.TokenManager.get_secure_storage_path")
@patch("builtins.open", new_callable=unittest.mock.mock_open) @patch("builtins.open", new_callable=unittest.mock.mock_open)
@patch("crewai.cli.authentication.utils.os.chmod") @patch("crewai.cli.authentication.utils.os.chmod")
@@ -119,6 +127,7 @@ class TestTokenManager(unittest.TestCase):
mock_open().write.assert_called_once_with(content) mock_open().write.assert_called_once_with(content)
mock_chmod.assert_called_once_with(mock_path.__truediv__.return_value, 0o600) mock_chmod.assert_called_once_with(mock_path.__truediv__.return_value, 0o600)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.get_secure_storage_path") @patch("crewai.cli.authentication.utils.TokenManager.get_secure_storage_path")
@patch( @patch(
"builtins.open", new_callable=unittest.mock.mock_open, read_data=b"test_content" "builtins.open", new_callable=unittest.mock.mock_open, read_data=b"test_content"
@@ -135,6 +144,7 @@ class TestTokenManager(unittest.TestCase):
mock_path.__truediv__.assert_called_once_with(filename) mock_path.__truediv__.assert_called_once_with(filename)
mock_open.assert_called_once_with(mock_path.__truediv__.return_value, "rb") mock_open.assert_called_once_with(mock_path.__truediv__.return_value, "rb")
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.get_secure_storage_path") @patch("crewai.cli.authentication.utils.TokenManager.get_secure_storage_path")
def test_read_secure_file_not_exists(self, mock_get_path): def test_read_secure_file_not_exists(self, mock_get_path):
mock_path = MagicMock() mock_path = MagicMock()

View File

@@ -12,6 +12,7 @@ from crewai.cli.utils import parse_toml
class TestDeployCommand(unittest.TestCase): class TestDeployCommand(unittest.TestCase):
@pytest.mark.timeout(60)
@patch("crewai.cli.command.get_auth_token") @patch("crewai.cli.command.get_auth_token")
@patch("crewai.cli.deploy.main.get_project_name") @patch("crewai.cli.deploy.main.get_project_name")
@patch("crewai.cli.command.PlusAPI") @patch("crewai.cli.command.PlusAPI")
@@ -26,10 +27,12 @@ class TestDeployCommand(unittest.TestCase):
self.deploy_command = DeployCommand() self.deploy_command = DeployCommand()
self.mock_client = self.deploy_command.plus_api_client self.mock_client = self.deploy_command.plus_api_client
@pytest.mark.timeout(60)
def test_init_success(self): def test_init_success(self):
self.assertEqual(self.deploy_command.project_name, "test_project") self.assertEqual(self.deploy_command.project_name, "test_project")
self.mock_plus_api.assert_called_once_with(api_key="test_token") self.mock_plus_api.assert_called_once_with(api_key="test_token")
@pytest.mark.timeout(60)
@patch("crewai.cli.command.get_auth_token") @patch("crewai.cli.command.get_auth_token")
def test_init_failure(self, mock_get_auth_token): def test_init_failure(self, mock_get_auth_token):
mock_get_auth_token.side_effect = Exception("Auth failed") mock_get_auth_token.side_effect = Exception("Auth failed")
@@ -37,6 +40,7 @@ class TestDeployCommand(unittest.TestCase):
with self.assertRaises(SystemExit): with self.assertRaises(SystemExit):
DeployCommand() DeployCommand()
@pytest.mark.timeout(60)
def test_validate_response_successful_response(self): def test_validate_response_successful_response(self):
mock_response = Mock(spec=requests.Response) mock_response = Mock(spec=requests.Response)
mock_response.json.return_value = {"message": "Success"} mock_response.json.return_value = {"message": "Success"}
@@ -47,6 +51,7 @@ class TestDeployCommand(unittest.TestCase):
self.deploy_command._validate_response(mock_response) self.deploy_command._validate_response(mock_response)
assert fake_out.getvalue() == "" assert fake_out.getvalue() == ""
@pytest.mark.timeout(60)
def test_validate_response_json_decode_error(self): def test_validate_response_json_decode_error(self):
mock_response = Mock(spec=requests.Response) mock_response = Mock(spec=requests.Response)
mock_response.json.side_effect = JSONDecodeError("Decode error", "", 0) mock_response.json.side_effect = JSONDecodeError("Decode error", "", 0)
@@ -64,6 +69,7 @@ class TestDeployCommand(unittest.TestCase):
assert "Status Code: 500" in output assert "Status Code: 500" in output
assert "Response:\nb'Invalid JSON'" in output assert "Response:\nb'Invalid JSON'" in output
@pytest.mark.timeout(60)
def test_validate_response_422_error(self): def test_validate_response_422_error(self):
mock_response = Mock(spec=requests.Response) mock_response = Mock(spec=requests.Response)
mock_response.json.return_value = { mock_response.json.return_value = {
@@ -84,6 +90,7 @@ class TestDeployCommand(unittest.TestCase):
assert "Field1 Error message 1" in output assert "Field1 Error message 1" in output
assert "Field2 Error message 2" in output assert "Field2 Error message 2" in output
@pytest.mark.timeout(60)
def test_validate_response_other_error(self): def test_validate_response_other_error(self):
mock_response = Mock(spec=requests.Response) mock_response = Mock(spec=requests.Response)
mock_response.json.return_value = {"error": "Something went wrong"} mock_response.json.return_value = {"error": "Something went wrong"}
@@ -97,11 +104,13 @@ class TestDeployCommand(unittest.TestCase):
assert "Request to Enterprise API failed. Details:" in output assert "Request to Enterprise API failed. Details:" in output
assert "Details:\nSomething went wrong" in output assert "Details:\nSomething went wrong" in output
@pytest.mark.timeout(60)
def test_standard_no_param_error_message(self): def test_standard_no_param_error_message(self):
with patch("sys.stdout", new=StringIO()) as fake_out: with patch("sys.stdout", new=StringIO()) as fake_out:
self.deploy_command._standard_no_param_error_message() self.deploy_command._standard_no_param_error_message()
self.assertIn("No UUID provided", fake_out.getvalue()) self.assertIn("No UUID provided", fake_out.getvalue())
@pytest.mark.timeout(60)
def test_display_deployment_info(self): def test_display_deployment_info(self):
with patch("sys.stdout", new=StringIO()) as fake_out: with patch("sys.stdout", new=StringIO()) as fake_out:
self.deploy_command._display_deployment_info( self.deploy_command._display_deployment_info(
@@ -111,6 +120,7 @@ class TestDeployCommand(unittest.TestCase):
self.assertIn("test-uuid", fake_out.getvalue()) self.assertIn("test-uuid", fake_out.getvalue())
self.assertIn("deployed", fake_out.getvalue()) self.assertIn("deployed", fake_out.getvalue())
@pytest.mark.timeout(60)
def test_display_logs(self): def test_display_logs(self):
with patch("sys.stdout", new=StringIO()) as fake_out: with patch("sys.stdout", new=StringIO()) as fake_out:
self.deploy_command._display_logs( self.deploy_command._display_logs(
@@ -118,6 +128,7 @@ class TestDeployCommand(unittest.TestCase):
) )
self.assertIn("2023-01-01 - INFO: Test log", fake_out.getvalue()) self.assertIn("2023-01-01 - INFO: Test log", fake_out.getvalue())
@pytest.mark.timeout(60)
@patch("crewai.cli.deploy.main.DeployCommand._display_deployment_info") @patch("crewai.cli.deploy.main.DeployCommand._display_deployment_info")
def test_deploy_with_uuid(self, mock_display): def test_deploy_with_uuid(self, mock_display):
mock_response = MagicMock() mock_response = MagicMock()
@@ -130,6 +141,7 @@ class TestDeployCommand(unittest.TestCase):
self.mock_client.deploy_by_uuid.assert_called_once_with("test-uuid") self.mock_client.deploy_by_uuid.assert_called_once_with("test-uuid")
mock_display.assert_called_once_with({"uuid": "test-uuid"}) mock_display.assert_called_once_with({"uuid": "test-uuid"})
@pytest.mark.timeout(60)
@patch("crewai.cli.deploy.main.DeployCommand._display_deployment_info") @patch("crewai.cli.deploy.main.DeployCommand._display_deployment_info")
def test_deploy_with_project_name(self, mock_display): def test_deploy_with_project_name(self, mock_display):
mock_response = MagicMock() mock_response = MagicMock()
@@ -142,6 +154,7 @@ class TestDeployCommand(unittest.TestCase):
self.mock_client.deploy_by_name.assert_called_once_with("test_project") self.mock_client.deploy_by_name.assert_called_once_with("test_project")
mock_display.assert_called_once_with({"uuid": "test-uuid"}) mock_display.assert_called_once_with({"uuid": "test-uuid"})
@pytest.mark.timeout(60)
@patch("crewai.cli.deploy.main.fetch_and_json_env_file") @patch("crewai.cli.deploy.main.fetch_and_json_env_file")
@patch("crewai.cli.deploy.main.git.Repository.origin_url") @patch("crewai.cli.deploy.main.git.Repository.origin_url")
@patch("builtins.input") @patch("builtins.input")
@@ -160,6 +173,7 @@ class TestDeployCommand(unittest.TestCase):
self.assertIn("Deployment created successfully!", fake_out.getvalue()) self.assertIn("Deployment created successfully!", fake_out.getvalue())
self.assertIn("new-uuid", fake_out.getvalue()) self.assertIn("new-uuid", fake_out.getvalue())
@pytest.mark.timeout(60)
def test_list_crews(self): def test_list_crews(self):
mock_response = MagicMock() mock_response = MagicMock()
mock_response.status_code = 200 mock_response.status_code = 200
@@ -174,6 +188,7 @@ class TestDeployCommand(unittest.TestCase):
self.assertIn("Crew1 (uuid1) active", fake_out.getvalue()) self.assertIn("Crew1 (uuid1) active", fake_out.getvalue())
self.assertIn("Crew2 (uuid2) inactive", fake_out.getvalue()) self.assertIn("Crew2 (uuid2) inactive", fake_out.getvalue())
@pytest.mark.timeout(60)
def test_get_crew_status(self): def test_get_crew_status(self):
mock_response = MagicMock() mock_response = MagicMock()
mock_response.status_code = 200 mock_response.status_code = 200
@@ -185,6 +200,7 @@ class TestDeployCommand(unittest.TestCase):
self.assertIn("TestCrew", fake_out.getvalue()) self.assertIn("TestCrew", fake_out.getvalue())
self.assertIn("active", fake_out.getvalue()) self.assertIn("active", fake_out.getvalue())
@pytest.mark.timeout(60)
def test_get_crew_logs(self): def test_get_crew_logs(self):
mock_response = MagicMock() mock_response = MagicMock()
mock_response.status_code = 200 mock_response.status_code = 200
@@ -199,6 +215,7 @@ class TestDeployCommand(unittest.TestCase):
self.assertIn("2023-01-01 - INFO: Log1", fake_out.getvalue()) self.assertIn("2023-01-01 - INFO: Log1", fake_out.getvalue())
self.assertIn("2023-01-02 - ERROR: Log2", fake_out.getvalue()) self.assertIn("2023-01-02 - ERROR: Log2", fake_out.getvalue())
@pytest.mark.timeout(60)
def test_remove_crew(self): def test_remove_crew(self):
mock_response = MagicMock() mock_response = MagicMock()
mock_response.status_code = 204 mock_response.status_code = 204
@@ -210,6 +227,7 @@ class TestDeployCommand(unittest.TestCase):
"Crew 'test_project' removed successfully", fake_out.getvalue() "Crew 'test_project' removed successfully", fake_out.getvalue()
) )
@pytest.mark.timeout(60)
@unittest.skipIf(sys.version_info < (3, 11), "Requires Python 3.11+") @unittest.skipIf(sys.version_info < (3, 11), "Requires Python 3.11+")
def test_parse_toml_python_311_plus(self): def test_parse_toml_python_311_plus(self):
toml_content = """ toml_content = """
@@ -224,6 +242,7 @@ class TestDeployCommand(unittest.TestCase):
parsed = parse_toml(toml_content) parsed = parse_toml(toml_content)
self.assertEqual(parsed["tool"]["poetry"]["name"], "test_project") self.assertEqual(parsed["tool"]["poetry"]["name"], "test_project")
@pytest.mark.timeout(60)
@patch( @patch(
"builtins.open", "builtins.open",
new_callable=unittest.mock.mock_open, new_callable=unittest.mock.mock_open,
@@ -242,6 +261,7 @@ class TestDeployCommand(unittest.TestCase):
print("project_name", project_name) print("project_name", project_name)
self.assertEqual(project_name, "test_project") self.assertEqual(project_name, "test_project")
@pytest.mark.timeout(60)
@unittest.skipIf(sys.version_info < (3, 11), "Requires Python 3.11+") @unittest.skipIf(sys.version_info < (3, 11), "Requires Python 3.11+")
@patch( @patch(
"builtins.open", "builtins.open",
@@ -260,6 +280,7 @@ class TestDeployCommand(unittest.TestCase):
project_name = get_project_name() project_name = get_project_name()
self.assertEqual(project_name, "test_project") self.assertEqual(project_name, "test_project")
@pytest.mark.timeout(60)
def test_get_crewai_version(self): def test_get_crewai_version(self):
from crewai.cli.version import get_crewai_version from crewai.cli.version import get_crewai_version

View File

@@ -7,6 +7,7 @@ from io import StringIO
from unittest import mock from unittest import mock
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
import pytest
from pytest import raises from pytest import raises
from crewai.cli.tools.main import ToolCommand from crewai.cli.tools.main import ToolCommand
@@ -23,14 +24,16 @@ def in_temp_dir():
os.chdir(original_dir) os.chdir(original_dir)
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.subprocess.run") @patch("crewai.cli.tools.main.subprocess.run")
def test_create_success(mock_subprocess): def test_create_success(mock_subprocess):
with in_temp_dir(): with in_temp_dir():
tool_command = ToolCommand() tool_command = ToolCommand()
with patch.object(tool_command, "login") as mock_login, patch( with (
"sys.stdout", new=StringIO() patch.object(tool_command, "login") as mock_login,
) as fake_out: patch("sys.stdout", new=StringIO()) as fake_out,
):
tool_command.create("test-tool") tool_command.create("test-tool")
output = fake_out.getvalue() output = fake_out.getvalue()
@@ -52,6 +55,7 @@ def test_create_success(mock_subprocess):
assert "Creating custom tool test_tool..." in output assert "Creating custom tool test_tool..." in output
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.subprocess.run") @patch("crewai.cli.tools.main.subprocess.run")
@patch("crewai.cli.plus_api.PlusAPI.get_tool") @patch("crewai.cli.plus_api.PlusAPI.get_tool")
def test_install_success(mock_get, mock_subprocess_run): def test_install_success(mock_get, mock_subprocess_run):
@@ -82,12 +86,13 @@ def test_install_success(mock_get, mock_subprocess_run):
capture_output=False, capture_output=False,
text=True, text=True,
check=True, check=True,
env=unittest.mock.ANY env=unittest.mock.ANY,
) )
assert "Successfully installed sample-tool" in output assert "Successfully installed sample-tool" in output
@pytest.mark.timeout(60)
@patch("crewai.cli.plus_api.PlusAPI.get_tool") @patch("crewai.cli.plus_api.PlusAPI.get_tool")
def test_install_tool_not_found(mock_get): def test_install_tool_not_found(mock_get):
mock_get_response = MagicMock() mock_get_response = MagicMock()
@@ -107,6 +112,7 @@ def test_install_tool_not_found(mock_get):
assert "No tool found with this name" in output assert "No tool found with this name" in output
@pytest.mark.timeout(60)
@patch("crewai.cli.plus_api.PlusAPI.get_tool") @patch("crewai.cli.plus_api.PlusAPI.get_tool")
def test_install_api_error(mock_get): def test_install_api_error(mock_get):
mock_get_response = MagicMock() mock_get_response = MagicMock()
@@ -126,6 +132,7 @@ def test_install_api_error(mock_get):
assert "Failed to get tool details" in output assert "Failed to get tool details" in output
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.git.Repository.is_synced", return_value=False) @patch("crewai.cli.tools.main.git.Repository.is_synced", return_value=False)
def test_publish_when_not_in_sync(mock_is_synced): def test_publish_when_not_in_sync(mock_is_synced):
with patch("sys.stdout", new=StringIO()) as fake_out, raises(SystemExit): with patch("sys.stdout", new=StringIO()) as fake_out, raises(SystemExit):
@@ -135,6 +142,7 @@ def test_publish_when_not_in_sync(mock_is_synced):
assert "Local changes need to be resolved before publishing" in fake_out.getvalue() assert "Local changes need to be resolved before publishing" in fake_out.getvalue()
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool") @patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool")
@patch("crewai.cli.tools.main.get_project_version", return_value="1.0.0") @patch("crewai.cli.tools.main.get_project_version", return_value="1.0.0")
@patch("crewai.cli.tools.main.get_project_description", return_value="A sample tool") @patch("crewai.cli.tools.main.get_project_description", return_value="A sample tool")
@@ -183,6 +191,7 @@ def test_publish_when_not_in_sync_and_force(
) )
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool") @patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool")
@patch("crewai.cli.tools.main.get_project_version", return_value="1.0.0") @patch("crewai.cli.tools.main.get_project_version", return_value="1.0.0")
@patch("crewai.cli.tools.main.get_project_description", return_value="A sample tool") @patch("crewai.cli.tools.main.get_project_description", return_value="A sample tool")
@@ -231,6 +240,7 @@ def test_publish_success(
) )
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool") @patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool")
@patch("crewai.cli.tools.main.get_project_version", return_value="1.0.0") @patch("crewai.cli.tools.main.get_project_version", return_value="1.0.0")
@patch("crewai.cli.tools.main.get_project_description", return_value="A sample tool") @patch("crewai.cli.tools.main.get_project_description", return_value="A sample tool")
@@ -270,6 +280,7 @@ def test_publish_failure(
assert "Name is already taken" in output assert "Name is already taken" in output
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool") @patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool")
@patch("crewai.cli.tools.main.get_project_version", return_value="1.0.0") @patch("crewai.cli.tools.main.get_project_version", return_value="1.0.0")
@patch("crewai.cli.tools.main.get_project_description", return_value="A sample tool") @patch("crewai.cli.tools.main.get_project_description", return_value="A sample tool")

View File

@@ -333,16 +333,16 @@ def test_manager_agent_delegating_to_assigned_task_agent():
) )
mock_task_output = TaskOutput( mock_task_output = TaskOutput(
description="Mock description", description="Mock description", raw="mocked output", agent="mocked agent"
raw="mocked output",
agent="mocked agent"
) )
# Because we are mocking execute_sync, we never hit the underlying _execute_core # Because we are mocking execute_sync, we never hit the underlying _execute_core
# which sets the output attribute of the task # which sets the output attribute of the task
task.output = mock_task_output task.output = mock_task_output
with patch.object(Task, 'execute_sync', return_value=mock_task_output) as mock_execute_sync: with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff() crew.kickoff()
# Verify execute_sync was called once # Verify execute_sync was called once
@@ -350,12 +350,20 @@ def test_manager_agent_delegating_to_assigned_task_agent():
# Get the tools argument from the call # Get the tools argument from the call
_, kwargs = mock_execute_sync.call_args _, kwargs = mock_execute_sync.call_args
tools = kwargs['tools'] tools = kwargs["tools"]
# Verify the delegation tools were passed correctly # Verify the delegation tools were passed correctly
assert len(tools) == 2 assert len(tools) == 2
assert any("Delegate a specific task to one of the following coworkers: Researcher" in tool.description for tool in tools) assert any(
assert any("Ask a specific question to one of the following coworkers: Researcher" in tool.description for tool in tools) "Delegate a specific task to one of the following coworkers: Researcher"
in tool.description
for tool in tools
)
assert any(
"Ask a specific question to one of the following coworkers: Researcher"
in tool.description
for tool in tools
)
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@@ -404,7 +412,7 @@ def test_manager_agent_delegates_with_varied_role_cases():
backstory="A researcher with spaces in role name", backstory="A researcher with spaces in role name",
allow_delegation=False, allow_delegation=False,
) )
writer_caps = Agent( writer_caps = Agent(
role="SENIOR WRITER", # All caps role="SENIOR WRITER", # All caps
goal="Write with caps in role", goal="Write with caps in role",
@@ -426,13 +434,13 @@ def test_manager_agent_delegates_with_varied_role_cases():
) )
mock_task_output = TaskOutput( mock_task_output = TaskOutput(
description="Mock description", description="Mock description", raw="mocked output", agent="mocked agent"
raw="mocked output",
agent="mocked agent"
) )
task.output = mock_task_output task.output = mock_task_output
with patch.object(Task, 'execute_sync', return_value=mock_task_output) as mock_execute_sync: with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff() crew.kickoff()
# Verify execute_sync was called once # Verify execute_sync was called once
@@ -440,20 +448,32 @@ def test_manager_agent_delegates_with_varied_role_cases():
# Get the tools argument from the call # Get the tools argument from the call
_, kwargs = mock_execute_sync.call_args _, kwargs = mock_execute_sync.call_args
tools = kwargs['tools'] tools = kwargs["tools"]
# Verify the delegation tools were passed correctly and can handle case/whitespace variations # Verify the delegation tools were passed correctly and can handle case/whitespace variations
assert len(tools) == 2 assert len(tools) == 2
# Check delegation tool descriptions (should work despite case/whitespace differences) # Check delegation tool descriptions (should work despite case/whitespace differences)
delegation_tool = tools[0] delegation_tool = tools[0]
question_tool = tools[1] question_tool = tools[1]
assert "Delegate a specific task to one of the following coworkers:" in delegation_tool.description assert (
assert " Researcher " in delegation_tool.description or "SENIOR WRITER" in delegation_tool.description "Delegate a specific task to one of the following coworkers:"
in delegation_tool.description
assert "Ask a specific question to one of the following coworkers:" in question_tool.description )
assert " Researcher " in question_tool.description or "SENIOR WRITER" in question_tool.description assert (
" Researcher " in delegation_tool.description
or "SENIOR WRITER" in delegation_tool.description
)
assert (
"Ask a specific question to one of the following coworkers:"
in question_tool.description
)
assert (
" Researcher " in question_tool.description
or "SENIOR WRITER" in question_tool.description
)
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@@ -479,6 +499,7 @@ def test_crew_with_delegating_agents():
== "In the rapidly evolving landscape of technology, AI agents have emerged as formidable tools, revolutionizing how we interact with data and automate tasks. These sophisticated systems leverage machine learning and natural language processing to perform a myriad of functions, from virtual personal assistants to complex decision-making companions in industries such as finance, healthcare, and education. By mimicking human intelligence, AI agents can analyze massive data sets at unparalleled speeds, enabling businesses to uncover valuable insights, enhance productivity, and elevate user experiences to unprecedented levels.\n\nOne of the most striking aspects of AI agents is their adaptability; they learn from their interactions and continuously improve their performance over time. This feature is particularly valuable in customer service where AI agents can address inquiries, resolve issues, and provide personalized recommendations without the limitations of human fatigue. Moreover, with intuitive interfaces, AI agents enhance user interactions, making technology more accessible and user-friendly, thereby breaking down barriers that have historically hindered digital engagement.\n\nDespite their immense potential, the deployment of AI agents raises important ethical and practical considerations. Issues related to privacy, data security, and the potential for job displacement necessitate thoughtful dialogue and proactive measures. Striking a balance between technological innovation and societal impact will be crucial as organizations integrate these agents into their operations. Additionally, ensuring transparency in AI decision-making processes is vital to maintain public trust as AI agents become an integral part of daily life.\n\nLooking ahead, the future of AI agents appears bright, with ongoing advancements promising even greater capabilities. As we continue to harness the power of AI, we can expect these agents to play a transformative role in shaping various sectors—streamlining workflows, enabling smarter decision-making, and fostering more personalized experiences. Embracing this technology responsibly can lead to a future where AI agents not only augment human effort but also inspire creativity and efficiency across the board, ultimately redefining our interaction with the digital world." == "In the rapidly evolving landscape of technology, AI agents have emerged as formidable tools, revolutionizing how we interact with data and automate tasks. These sophisticated systems leverage machine learning and natural language processing to perform a myriad of functions, from virtual personal assistants to complex decision-making companions in industries such as finance, healthcare, and education. By mimicking human intelligence, AI agents can analyze massive data sets at unparalleled speeds, enabling businesses to uncover valuable insights, enhance productivity, and elevate user experiences to unprecedented levels.\n\nOne of the most striking aspects of AI agents is their adaptability; they learn from their interactions and continuously improve their performance over time. This feature is particularly valuable in customer service where AI agents can address inquiries, resolve issues, and provide personalized recommendations without the limitations of human fatigue. Moreover, with intuitive interfaces, AI agents enhance user interactions, making technology more accessible and user-friendly, thereby breaking down barriers that have historically hindered digital engagement.\n\nDespite their immense potential, the deployment of AI agents raises important ethical and practical considerations. Issues related to privacy, data security, and the potential for job displacement necessitate thoughtful dialogue and proactive measures. Striking a balance between technological innovation and societal impact will be crucial as organizations integrate these agents into their operations. Additionally, ensuring transparency in AI decision-making processes is vital to maintain public trust as AI agents become an integral part of daily life.\n\nLooking ahead, the future of AI agents appears bright, with ongoing advancements promising even greater capabilities. As we continue to harness the power of AI, we can expect these agents to play a transformative role in shaping various sectors—streamlining workflows, enabling smarter decision-making, and fostering more personalized experiences. Embracing this technology responsibly can lead to a future where AI agents not only augment human effort but also inspire creativity and efficiency across the board, ultimately redefining our interaction with the digital world."
) )
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_with_delegating_agents_should_not_override_task_tools(): def test_crew_with_delegating_agents_should_not_override_task_tools():
from typing import Type from typing import Type
@@ -489,6 +510,7 @@ def test_crew_with_delegating_agents_should_not_override_task_tools():
class TestToolInput(BaseModel): class TestToolInput(BaseModel):
"""Input schema for TestTool.""" """Input schema for TestTool."""
query: str = Field(..., description="Query to process") query: str = Field(..., description="Query to process")
class TestTool(BaseTool): class TestTool(BaseTool):
@@ -516,24 +538,29 @@ def test_crew_with_delegating_agents_should_not_override_task_tools():
) )
mock_task_output = TaskOutput( mock_task_output = TaskOutput(
description="Mock description", description="Mock description", raw="mocked output", agent="mocked agent"
raw="mocked output",
agent="mocked agent"
) )
# Because we are mocking execute_sync, we never hit the underlying _execute_core # Because we are mocking execute_sync, we never hit the underlying _execute_core
# which sets the output attribute of the task # which sets the output attribute of the task
tasks[0].output = mock_task_output tasks[0].output = mock_task_output
with patch.object(Task, 'execute_sync', return_value=mock_task_output) as mock_execute_sync: with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff() crew.kickoff()
# Execute the task and verify both tools are present # Execute the task and verify both tools are present
_, kwargs = mock_execute_sync.call_args _, kwargs = mock_execute_sync.call_args
tools = kwargs['tools'] tools = kwargs["tools"]
assert any(
isinstance(tool, TestTool) for tool in tools
), "TestTool should be present"
assert any(
"delegate" in tool.name.lower() for tool in tools
), "Delegation tool should be present"
assert any(isinstance(tool, TestTool) for tool in tools), "TestTool should be present"
assert any("delegate" in tool.name.lower() for tool in tools), "Delegation tool should be present"
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_with_delegating_agents_should_not_override_agent_tools(): def test_crew_with_delegating_agents_should_not_override_agent_tools():
@@ -545,6 +572,7 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools():
class TestToolInput(BaseModel): class TestToolInput(BaseModel):
"""Input schema for TestTool.""" """Input schema for TestTool."""
query: str = Field(..., description="Query to process") query: str = Field(..., description="Query to process")
class TestTool(BaseTool): class TestTool(BaseTool):
@@ -563,7 +591,7 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools():
Task( Task(
description="Produce and amazing 1 paragraph draft of an article about AI Agents.", description="Produce and amazing 1 paragraph draft of an article about AI Agents.",
expected_output="A 4 paragraph article about AI.", expected_output="A 4 paragraph article about AI.",
agent=new_ceo agent=new_ceo,
) )
] ]
@@ -574,24 +602,29 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools():
) )
mock_task_output = TaskOutput( mock_task_output = TaskOutput(
description="Mock description", description="Mock description", raw="mocked output", agent="mocked agent"
raw="mocked output",
agent="mocked agent"
) )
# Because we are mocking execute_sync, we never hit the underlying _execute_core # Because we are mocking execute_sync, we never hit the underlying _execute_core
# which sets the output attribute of the task # which sets the output attribute of the task
tasks[0].output = mock_task_output tasks[0].output = mock_task_output
with patch.object(Task, 'execute_sync', return_value=mock_task_output) as mock_execute_sync: with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff() crew.kickoff()
# Execute the task and verify both tools are present # Execute the task and verify both tools are present
_, kwargs = mock_execute_sync.call_args _, kwargs = mock_execute_sync.call_args
tools = kwargs['tools'] tools = kwargs["tools"]
assert any(
isinstance(tool, TestTool) for tool in new_ceo.tools
), "TestTool should be present"
assert any(
"delegate" in tool.name.lower() for tool in tools
), "Delegation tool should be present"
assert any(isinstance(tool, TestTool) for tool in new_ceo.tools), "TestTool should be present"
assert any("delegate" in tool.name.lower() for tool in tools), "Delegation tool should be present"
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_task_tools_override_agent_tools(): def test_task_tools_override_agent_tools():
@@ -603,6 +636,7 @@ def test_task_tools_override_agent_tools():
class TestToolInput(BaseModel): class TestToolInput(BaseModel):
"""Input schema for TestTool.""" """Input schema for TestTool."""
query: str = Field(..., description="Query to process") query: str = Field(..., description="Query to process")
class TestTool(BaseTool): class TestTool(BaseTool):
@@ -630,14 +664,10 @@ def test_task_tools_override_agent_tools():
description="Write a test task", description="Write a test task",
expected_output="Test output", expected_output="Test output",
agent=new_researcher, agent=new_researcher,
tools=[AnotherTestTool()] tools=[AnotherTestTool()],
) )
crew = Crew( crew = Crew(agents=[new_researcher], tasks=[task], process=Process.sequential)
agents=[new_researcher],
tasks=[task],
process=Process.sequential
)
crew.kickoff() crew.kickoff()
@@ -650,6 +680,7 @@ def test_task_tools_override_agent_tools():
assert len(new_researcher.tools) == 1 assert len(new_researcher.tools) == 1
assert isinstance(new_researcher.tools[0], TestTool) assert isinstance(new_researcher.tools[0], TestTool)
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_task_tools_override_agent_tools_with_allow_delegation(): def test_task_tools_override_agent_tools_with_allow_delegation():
""" """
@@ -702,13 +733,13 @@ def test_task_tools_override_agent_tools_with_allow_delegation():
) )
mock_task_output = TaskOutput( mock_task_output = TaskOutput(
description="Mock description", description="Mock description", raw="mocked output", agent="mocked agent"
raw="mocked output",
agent="mocked agent"
) )
# We mock execute_sync to verify which tools get used at runtime # We mock execute_sync to verify which tools get used at runtime
with patch.object(Task, "execute_sync", return_value=mock_task_output) as mock_execute_sync: with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff() crew.kickoff()
# Inspect the call kwargs to verify the actual tools passed to execution # Inspect the call kwargs to verify the actual tools passed to execution
@@ -716,16 +747,23 @@ def test_task_tools_override_agent_tools_with_allow_delegation():
used_tools = kwargs["tools"] used_tools = kwargs["tools"]
# Confirm AnotherTestTool is present but TestTool is not # Confirm AnotherTestTool is present but TestTool is not
assert any(isinstance(tool, AnotherTestTool) for tool in used_tools), "AnotherTestTool should be present" assert any(
assert not any(isinstance(tool, TestTool) for tool in used_tools), "TestTool should not be present among used tools" isinstance(tool, AnotherTestTool) for tool in used_tools
), "AnotherTestTool should be present"
assert not any(
isinstance(tool, TestTool) for tool in used_tools
), "TestTool should not be present among used tools"
# Confirm delegation tool(s) are present # Confirm delegation tool(s) are present
assert any("delegate" in tool.name.lower() for tool in used_tools), "Delegation tool should be present" assert any(
"delegate" in tool.name.lower() for tool in used_tools
), "Delegation tool should be present"
# Finally, make sure the agent's original tools remain unchanged # Finally, make sure the agent's original tools remain unchanged
assert len(researcher_with_delegation.tools) == 1 assert len(researcher_with_delegation.tools) == 1
assert isinstance(researcher_with_delegation.tools[0], TestTool) assert isinstance(researcher_with_delegation.tools[0], TestTool)
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_verbose_output(capsys): def test_crew_verbose_output(capsys):
tasks = [ tasks = [
@@ -1518,12 +1556,12 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
crew = Crew(agents=[programmer], tasks=[task]) crew = Crew(agents=[programmer], tasks=[task])
mock_task_output = TaskOutput( mock_task_output = TaskOutput(
description="Mock description", description="Mock description", raw="mocked output", agent="mocked agent"
raw="mocked output",
agent="mocked agent"
) )
with patch.object(Task, "execute_sync", return_value=mock_task_output) as mock_execute_sync: with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff() crew.kickoff()
# Get the tools that were actually used in execution # Get the tools that were actually used in execution
@@ -1532,7 +1570,10 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
# Verify that exactly one tool was used and it was a CodeInterpreterTool # Verify that exactly one tool was used and it was a CodeInterpreterTool
assert len(used_tools) == 1, "Should have exactly one tool" assert len(used_tools) == 1, "Should have exactly one tool"
assert isinstance(used_tools[0], CodeInterpreterTool), "Tool should be CodeInterpreterTool" assert isinstance(
used_tools[0], CodeInterpreterTool
), "Tool should be CodeInterpreterTool"
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_delegation_is_not_enabled_if_there_are_only_one_agent(): def test_delegation_is_not_enabled_if_there_are_only_one_agent():
@@ -1643,16 +1684,16 @@ def test_hierarchical_crew_creation_tasks_with_agents():
) )
mock_task_output = TaskOutput( mock_task_output = TaskOutput(
description="Mock description", description="Mock description", raw="mocked output", agent="mocked agent"
raw="mocked output",
agent="mocked agent"
) )
# Because we are mocking execute_sync, we never hit the underlying _execute_core # Because we are mocking execute_sync, we never hit the underlying _execute_core
# which sets the output attribute of the task # which sets the output attribute of the task
task.output = mock_task_output task.output = mock_task_output
with patch.object(Task, 'execute_sync', return_value=mock_task_output) as mock_execute_sync: with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff() crew.kickoff()
# Verify execute_sync was called once # Verify execute_sync was called once
@@ -1660,12 +1701,20 @@ def test_hierarchical_crew_creation_tasks_with_agents():
# Get the tools argument from the call # Get the tools argument from the call
_, kwargs = mock_execute_sync.call_args _, kwargs = mock_execute_sync.call_args
tools = kwargs['tools'] tools = kwargs["tools"]
# Verify the delegation tools were passed correctly # Verify the delegation tools were passed correctly
assert len(tools) == 2 assert len(tools) == 2
assert any("Delegate a specific task to one of the following coworkers: Senior Writer" in tool.description for tool in tools) assert any(
assert any("Ask a specific question to one of the following coworkers: Senior Writer" in tool.description for tool in tools) "Delegate a specific task to one of the following coworkers: Senior Writer"
in tool.description
for tool in tools
)
assert any(
"Ask a specific question to one of the following coworkers: Senior Writer"
in tool.description
for tool in tools
)
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@@ -1688,9 +1737,7 @@ def test_hierarchical_crew_creation_tasks_with_async_execution():
) )
mock_task_output = TaskOutput( mock_task_output = TaskOutput(
description="Mock description", description="Mock description", raw="mocked output", agent="mocked agent"
raw="mocked output",
agent="mocked agent"
) )
# Create a mock Future that returns our TaskOutput # Create a mock Future that returns our TaskOutput
@@ -1701,7 +1748,9 @@ def test_hierarchical_crew_creation_tasks_with_async_execution():
# which sets the output attribute of the task # which sets the output attribute of the task
task.output = mock_task_output task.output = mock_task_output
with patch.object(Task, 'execute_async', return_value=mock_future) as mock_execute_async: with patch.object(
Task, "execute_async", return_value=mock_future
) as mock_execute_async:
crew.kickoff() crew.kickoff()
# Verify execute_async was called once # Verify execute_async was called once
@@ -1709,12 +1758,20 @@ def test_hierarchical_crew_creation_tasks_with_async_execution():
# Get the tools argument from the call # Get the tools argument from the call
_, kwargs = mock_execute_async.call_args _, kwargs = mock_execute_async.call_args
tools = kwargs['tools'] tools = kwargs["tools"]
# Verify the delegation tools were passed correctly # Verify the delegation tools were passed correctly
assert len(tools) == 2 assert len(tools) == 2
assert any("Delegate a specific task to one of the following coworkers: Senior Writer\n" in tool.description for tool in tools) assert any(
assert any("Ask a specific question to one of the following coworkers: Senior Writer\n" in tool.description for tool in tools) "Delegate a specific task to one of the following coworkers: Senior Writer\n"
in tool.description
for tool in tools
)
assert any(
"Ask a specific question to one of the following coworkers: Senior Writer\n"
in tool.description
for tool in tools
)
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@@ -2059,7 +2116,7 @@ def test_crew_output_file_validation_failures():
description="Analyze data", description="Analyze data",
expected_output="Analysis results", expected_output="Analysis results",
agent=agent, agent=agent,
output_file="../output.txt" output_file="../output.txt",
) )
Crew(agents=[agent], tasks=[task]).kickoff() Crew(agents=[agent], tasks=[task]).kickoff()
@@ -2069,7 +2126,7 @@ def test_crew_output_file_validation_failures():
description="Analyze data", description="Analyze data",
expected_output="Analysis results", expected_output="Analysis results",
agent=agent, agent=agent,
output_file="output.txt | rm -rf /" output_file="output.txt | rm -rf /",
) )
Crew(agents=[agent], tasks=[task]).kickoff() Crew(agents=[agent], tasks=[task]).kickoff()
@@ -2079,7 +2136,7 @@ def test_crew_output_file_validation_failures():
description="Analyze data", description="Analyze data",
expected_output="Analysis results", expected_output="Analysis results",
agent=agent, agent=agent,
output_file="~/output.txt" output_file="~/output.txt",
) )
Crew(agents=[agent], tasks=[task]).kickoff() Crew(agents=[agent], tasks=[task]).kickoff()
@@ -2089,7 +2146,7 @@ def test_crew_output_file_validation_failures():
description="Analyze data", description="Analyze data",
expected_output="Analysis results", expected_output="Analysis results",
agent=agent, agent=agent,
output_file="{invalid-name}/output.txt" output_file="{invalid-name}/output.txt",
) )
Crew(agents=[agent], tasks=[task]).kickoff() Crew(agents=[agent], tasks=[task]).kickoff()
@@ -3053,6 +3110,7 @@ def test_task_tools_preserve_code_execution_tools():
class TestToolInput(BaseModel): class TestToolInput(BaseModel):
"""Input schema for TestTool.""" """Input schema for TestTool."""
query: str = Field(..., description="Query to process") query: str = Field(..., description="Query to process")
class TestTool(BaseTool): class TestTool(BaseTool):
@@ -3086,7 +3144,7 @@ def test_task_tools_preserve_code_execution_tools():
description="Write a program to calculate fibonacci numbers.", description="Write a program to calculate fibonacci numbers.",
expected_output="A working fibonacci calculator.", expected_output="A working fibonacci calculator.",
agent=programmer, agent=programmer,
tools=[TestTool()] tools=[TestTool()],
) )
crew = Crew( crew = Crew(
@@ -3096,12 +3154,12 @@ def test_task_tools_preserve_code_execution_tools():
) )
mock_task_output = TaskOutput( mock_task_output = TaskOutput(
description="Mock description", description="Mock description", raw="mocked output", agent="mocked agent"
raw="mocked output",
agent="mocked agent"
) )
with patch.object(Task, "execute_sync", return_value=mock_task_output) as mock_execute_sync: with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff() crew.kickoff()
# Get the tools that were actually used in execution # Get the tools that were actually used in execution
@@ -3109,12 +3167,21 @@ def test_task_tools_preserve_code_execution_tools():
used_tools = kwargs["tools"] used_tools = kwargs["tools"]
# Verify all expected tools are present # Verify all expected tools are present
assert any(isinstance(tool, TestTool) for tool in used_tools), "Task's TestTool should be present" assert any(
assert any(isinstance(tool, CodeInterpreterTool) for tool in used_tools), "CodeInterpreterTool should be present" isinstance(tool, TestTool) for tool in used_tools
assert any("delegate" in tool.name.lower() for tool in used_tools), "Delegation tool should be present" ), "Task's TestTool should be present"
assert any(
isinstance(tool, CodeInterpreterTool) for tool in used_tools
), "CodeInterpreterTool should be present"
assert any(
"delegate" in tool.name.lower() for tool in used_tools
), "Delegation tool should be present"
# Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools) # Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools)
assert len(used_tools) == 4, "Should have TestTool, CodeInterpreter, and 2 delegation tools" assert (
len(used_tools) == 4
), "Should have TestTool, CodeInterpreter, and 2 delegation tools"
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_multimodal_flag_adds_multimodal_tools(): def test_multimodal_flag_adds_multimodal_tools():
@@ -3143,13 +3210,13 @@ def test_multimodal_flag_adds_multimodal_tools():
crew = Crew(agents=[multimodal_agent], tasks=[task], process=Process.sequential) crew = Crew(agents=[multimodal_agent], tasks=[task], process=Process.sequential)
mock_task_output = TaskOutput( mock_task_output = TaskOutput(
description="Mock description", description="Mock description", raw="mocked output", agent="mocked agent"
raw="mocked output",
agent="mocked agent"
) )
# Mock execute_sync to verify the tools passed at runtime # Mock execute_sync to verify the tools passed at runtime
with patch.object(Task, "execute_sync", return_value=mock_task_output) as mock_execute_sync: with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff() crew.kickoff()
# Get the tools that were actually used in execution # Get the tools that were actually used in execution
@@ -3157,13 +3224,14 @@ def test_multimodal_flag_adds_multimodal_tools():
used_tools = kwargs["tools"] used_tools = kwargs["tools"]
# Check that the multimodal tool was added # Check that the multimodal tool was added
assert any(isinstance(tool, AddImageTool) for tool in used_tools), ( assert any(
"AddImageTool should be present when agent is multimodal" isinstance(tool, AddImageTool) for tool in used_tools
) ), "AddImageTool should be present when agent is multimodal"
# Verify we have exactly one tool (just the AddImageTool) # Verify we have exactly one tool (just the AddImageTool)
assert len(used_tools) == 1, "Should only have the AddImageTool" assert len(used_tools) == 1, "Should only have the AddImageTool"
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_multimodal_agent_image_tool_handling(): def test_multimodal_agent_image_tool_handling():
""" """
@@ -3205,10 +3273,10 @@ def test_multimodal_agent_image_tool_handling():
mock_task_output = TaskOutput( mock_task_output = TaskOutput(
description="Mock description", description="Mock description",
raw="A detailed analysis of the image", raw="A detailed analysis of the image",
agent="Image Analyst" agent="Image Analyst",
) )
with patch.object(Task, 'execute_sync') as mock_execute_sync: with patch.object(Task, "execute_sync") as mock_execute_sync:
# Set up the mock to return our task output # Set up the mock to return our task output
mock_execute_sync.return_value = mock_task_output mock_execute_sync.return_value = mock_task_output
@@ -3217,7 +3285,7 @@ def test_multimodal_agent_image_tool_handling():
# Get the tools that were passed to execute_sync # Get the tools that were passed to execute_sync
_, kwargs = mock_execute_sync.call_args _, kwargs = mock_execute_sync.call_args
tools = kwargs['tools'] tools = kwargs["tools"]
# Verify the AddImageTool is present and properly configured # Verify the AddImageTool is present and properly configured
image_tools = [tool for tool in tools if tool.name == "Add image to content"] image_tools = [tool for tool in tools if tool.name == "Add image to content"]
@@ -3227,7 +3295,7 @@ def test_multimodal_agent_image_tool_handling():
image_tool = image_tools[0] image_tool = image_tools[0]
result = image_tool._run( result = image_tool._run(
image_url="https://example.com/test-image.jpg", image_url="https://example.com/test-image.jpg",
action="Please analyze this image" action="Please analyze this image",
) )
# Verify the tool returns the expected format # Verify the tool returns the expected format
@@ -3237,6 +3305,7 @@ def test_multimodal_agent_image_tool_handling():
assert result["content"][0]["type"] == "text" assert result["content"][0]["type"] == "text"
assert result["content"][1]["type"] == "image_url" assert result["content"][1]["type"] == "image_url"
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_multimodal_agent_live_image_analysis(): def test_multimodal_agent_live_image_analysis():
""" """
@@ -3250,7 +3319,7 @@ def test_multimodal_agent_live_image_analysis():
allow_delegation=False, allow_delegation=False,
multimodal=True, multimodal=True,
verbose=True, verbose=True,
llm="gpt-4o" llm="gpt-4o",
) )
# Create a task for image analysis # Create a task for image analysis
@@ -3261,19 +3330,18 @@ def test_multimodal_agent_live_image_analysis():
Image: {image_url} Image: {image_url}
""", """,
expected_output="A comprehensive description of the image contents.", expected_output="A comprehensive description of the image contents.",
agent=image_analyst agent=image_analyst,
) )
# Create and run the crew # Create and run the crew
crew = Crew( crew = Crew(agents=[image_analyst], tasks=[analyze_image])
agents=[image_analyst],
tasks=[analyze_image]
)
# Execute with an image URL # Execute with an image URL
result = crew.kickoff(inputs={ result = crew.kickoff(
"image_url": "https://media.istockphoto.com/id/946087016/photo/aerial-view-of-lower-manhattan-new-york.jpg?s=612x612&w=0&k=20&c=viLiMRznQ8v5LzKTt_LvtfPFUVl1oiyiemVdSlm29_k=" inputs={
}) "image_url": "https://media.istockphoto.com/id/946087016/photo/aerial-view-of-lower-manhattan-new-york.jpg?s=612x612&w=0&k=20&c=viLiMRznQ8v5LzKTt_LvtfPFUVl1oiyiemVdSlm29_k="
}
)
# Verify we got a meaningful response # Verify we got a meaningful response
assert isinstance(result.raw, str) assert isinstance(result.raw, str)

View File

@@ -216,6 +216,7 @@ def test_multiple_output_type_error():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_pydantic_sequential(): def test_output_pydantic_sequential():
class ScoreOutput(BaseModel): class ScoreOutput(BaseModel):
score: int score: int
@@ -241,6 +242,7 @@ def test_output_pydantic_sequential():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_pydantic_hierarchical(): def test_output_pydantic_hierarchical():
class ScoreOutput(BaseModel): class ScoreOutput(BaseModel):
score: int score: int
@@ -271,6 +273,7 @@ def test_output_pydantic_hierarchical():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_sequential(): def test_output_json_sequential():
class ScoreOutput(BaseModel): class ScoreOutput(BaseModel):
score: int score: int
@@ -297,6 +300,7 @@ def test_output_json_sequential():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_hierarchical(): def test_output_json_hierarchical():
class ScoreOutput(BaseModel): class ScoreOutput(BaseModel):
score: int score: int
@@ -327,6 +331,7 @@ def test_output_json_hierarchical():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_json_property_without_output_json(): def test_json_property_without_output_json():
class ScoreOutput(BaseModel): class ScoreOutput(BaseModel):
score: int score: int
@@ -355,6 +360,7 @@ def test_json_property_without_output_json():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_dict_sequential(): def test_output_json_dict_sequential():
class ScoreOutput(BaseModel): class ScoreOutput(BaseModel):
score: int score: int
@@ -380,6 +386,7 @@ def test_output_json_dict_sequential():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_dict_hierarchical(): def test_output_json_dict_hierarchical():
class ScoreOutput(BaseModel): class ScoreOutput(BaseModel):
score: int score: int
@@ -410,6 +417,7 @@ def test_output_json_dict_hierarchical():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_pydantic_to_another_task(): def test_output_pydantic_to_another_task():
class ScoreOutput(BaseModel): class ScoreOutput(BaseModel):
score: int score: int
@@ -448,6 +456,7 @@ def test_output_pydantic_to_another_task():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_to_another_task(): def test_output_json_to_another_task():
class ScoreOutput(BaseModel): class ScoreOutput(BaseModel):
score: int score: int
@@ -479,6 +488,7 @@ def test_output_json_to_another_task():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_save_task_output(): def test_save_task_output():
scorer = Agent( scorer = Agent(
role="Scorer", role="Scorer",
@@ -503,6 +513,7 @@ def test_save_task_output():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_save_task_json_output(): def test_save_task_json_output():
class ScoreOutput(BaseModel): class ScoreOutput(BaseModel):
score: int score: int
@@ -533,6 +544,7 @@ def test_save_task_json_output():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_save_task_pydantic_output(): def test_save_task_pydantic_output():
class ScoreOutput(BaseModel): class ScoreOutput(BaseModel):
score: int score: int
@@ -563,6 +575,7 @@ def test_save_task_pydantic_output():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_custom_converter_cls(): def test_custom_converter_cls():
class ScoreOutput(BaseModel): class ScoreOutput(BaseModel):
score: int score: int
@@ -595,6 +608,7 @@ def test_custom_converter_cls():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_increment_delegations_for_hierarchical_process(): def test_increment_delegations_for_hierarchical_process():
scorer = Agent( scorer = Agent(
role="Scorer", role="Scorer",
@@ -622,6 +636,7 @@ def test_increment_delegations_for_hierarchical_process():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_increment_delegations_for_sequential_process(): def test_increment_delegations_for_sequential_process():
manager = Agent( manager = Agent(
role="Manager", role="Manager",
@@ -656,6 +671,7 @@ def test_increment_delegations_for_sequential_process():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_increment_tool_errors(): def test_increment_tool_errors():
from crewai.tools import tool from crewai.tools import tool
@@ -689,6 +705,7 @@ def test_increment_tool_errors():
assert len(increment_tools_errors.mock_calls) > 0 assert len(increment_tools_errors.mock_calls) > 0
@pytest.mark.timeout(60)
def test_task_definition_based_on_dict(): def test_task_definition_based_on_dict():
config = { config = {
"description": "Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work', check examples to based your evaluation.", "description": "Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work', check examples to based your evaluation.",
@@ -702,6 +719,7 @@ def test_task_definition_based_on_dict():
assert task.agent is None assert task.agent is None
@pytest.mark.timeout(60)
def test_conditional_task_definition_based_on_dict(): def test_conditional_task_definition_based_on_dict():
config = { config = {
"description": "Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work', check examples to based your evaluation.", "description": "Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work', check examples to based your evaluation.",
@@ -715,11 +733,12 @@ def test_conditional_task_definition_based_on_dict():
assert task.agent is None assert task.agent is None
@pytest.mark.timeout(60)
def test_interpolate_inputs(): def test_interpolate_inputs():
task = Task( task = Task(
description="Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting.", description="Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting.",
expected_output="Bullet point list of 5 interesting ideas about {topic}.", expected_output="Bullet point list of 5 interesting ideas about {topic}.",
output_file="/tmp/{topic}/output_{date}.txt" output_file="/tmp/{topic}/output_{date}.txt",
) )
task.interpolate_inputs(inputs={"topic": "AI", "date": "2024"}) task.interpolate_inputs(inputs={"topic": "AI", "date": "2024"})
@@ -739,48 +758,44 @@ def test_interpolate_inputs():
assert task.output_file == "/tmp/ML/output_2025.txt" assert task.output_file == "/tmp/ML/output_2025.txt"
@pytest.mark.timeout(60)
def test_interpolate_only(): def test_interpolate_only():
"""Test the interpolate_only method for various scenarios including JSON structure preservation.""" """Test the interpolate_only method for various scenarios including JSON structure preservation."""
task = Task( task = Task(
description="Unused in this test", description="Unused in this test", expected_output="Unused in this test"
expected_output="Unused in this test"
) )
# Test JSON structure preservation # Test JSON structure preservation
json_string = '{"info": "Look at {placeholder}", "nested": {"val": "{nestedVal}"}}' json_string = '{"info": "Look at {placeholder}", "nested": {"val": "{nestedVal}"}}'
result = task.interpolate_only( result = task.interpolate_only(
input_string=json_string, input_string=json_string,
inputs={"placeholder": "the data", "nestedVal": "something else"} inputs={"placeholder": "the data", "nestedVal": "something else"},
) )
assert '"info": "Look at the data"' in result assert '"info": "Look at the data"' in result
assert '"val": "something else"' in result assert '"val": "something else"' in result
assert "{placeholder}" not in result assert "{placeholder}" not in result
assert "{nestedVal}" not in result assert "{nestedVal}" not in result
# Test normal string interpolation # Test normal string interpolation
normal_string = "Hello {name}, welcome to {place}!" normal_string = "Hello {name}, welcome to {place}!"
result = task.interpolate_only( result = task.interpolate_only(
input_string=normal_string, input_string=normal_string, inputs={"name": "John", "place": "CrewAI"}
inputs={"name": "John", "place": "CrewAI"}
) )
assert result == "Hello John, welcome to CrewAI!" assert result == "Hello John, welcome to CrewAI!"
# Test empty string # Test empty string
result = task.interpolate_only( result = task.interpolate_only(input_string="", inputs={"unused": "value"})
input_string="",
inputs={"unused": "value"}
)
assert result == "" assert result == ""
# Test string with no placeholders # Test string with no placeholders
no_placeholders = "Hello, this is a test" no_placeholders = "Hello, this is a test"
result = task.interpolate_only( result = task.interpolate_only(
input_string=no_placeholders, input_string=no_placeholders, inputs={"unused": "value"}
inputs={"unused": "value"}
) )
assert result == no_placeholders assert result == no_placeholders
@pytest.mark.timeout(60)
def test_task_output_str_with_pydantic(): def test_task_output_str_with_pydantic():
from crewai.tasks.output_format import OutputFormat from crewai.tasks.output_format import OutputFormat
@@ -798,6 +813,7 @@ def test_task_output_str_with_pydantic():
assert str(task_output) == str(score_output) assert str(task_output) == str(score_output)
@pytest.mark.timeout(60)
def test_task_output_str_with_json_dict(): def test_task_output_str_with_json_dict():
from crewai.tasks.output_format import OutputFormat from crewai.tasks.output_format import OutputFormat
@@ -812,6 +828,7 @@ def test_task_output_str_with_json_dict():
assert str(task_output) == str(json_dict) assert str(task_output) == str(json_dict)
@pytest.mark.timeout(60)
def test_task_output_str_with_raw(): def test_task_output_str_with_raw():
from crewai.tasks.output_format import OutputFormat from crewai.tasks.output_format import OutputFormat
@@ -826,6 +843,7 @@ def test_task_output_str_with_raw():
assert str(task_output) == raw_output assert str(task_output) == raw_output
@pytest.mark.timeout(60)
def test_task_output_str_with_pydantic_and_json_dict(): def test_task_output_str_with_pydantic_and_json_dict():
from crewai.tasks.output_format import OutputFormat from crewai.tasks.output_format import OutputFormat
@@ -846,6 +864,7 @@ def test_task_output_str_with_pydantic_and_json_dict():
assert str(task_output) == str(score_output) assert str(task_output) == str(score_output)
@pytest.mark.timeout(60)
def test_task_output_str_with_none(): def test_task_output_str_with_none():
from crewai.tasks.output_format import OutputFormat from crewai.tasks.output_format import OutputFormat
@@ -858,6 +877,7 @@ def test_task_output_str_with_none():
assert str(task_output) == "" assert str(task_output) == ""
@pytest.mark.timeout(60)
def test_key(): def test_key():
original_description = "Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting." original_description = "Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting."
original_expected_output = "Bullet point list of 5 interesting ideas about {topic}." original_expected_output = "Bullet point list of 5 interesting ideas about {topic}."
@@ -877,59 +897,69 @@ def test_key():
), "The key should be the hash of the non-interpolated description." ), "The key should be the hash of the non-interpolated description."
@pytest.mark.timeout(60)
def test_output_file_validation(): def test_output_file_validation():
"""Test output file path validation.""" """Test output file path validation."""
# Valid paths # Valid paths
assert Task( assert (
description="Test task", Task(
expected_output="Test output", description="Test task",
output_file="output.txt" expected_output="Test output",
).output_file == "output.txt" output_file="output.txt",
assert Task( ).output_file
description="Test task", == "output.txt"
expected_output="Test output", )
output_file="/tmp/output.txt" assert (
).output_file == "tmp/output.txt" Task(
assert Task( description="Test task",
description="Test task", expected_output="Test output",
expected_output="Test output", output_file="/tmp/output.txt",
output_file="{dir}/output_{date}.txt" ).output_file
).output_file == "{dir}/output_{date}.txt" == "tmp/output.txt"
)
assert (
Task(
description="Test task",
expected_output="Test output",
output_file="{dir}/output_{date}.txt",
).output_file
== "{dir}/output_{date}.txt"
)
# Invalid paths # Invalid paths
with pytest.raises(ValueError, match="Path traversal"): with pytest.raises(ValueError, match="Path traversal"):
Task( Task(
description="Test task", description="Test task",
expected_output="Test output", expected_output="Test output",
output_file="../output.txt" output_file="../output.txt",
) )
with pytest.raises(ValueError, match="Path traversal"): with pytest.raises(ValueError, match="Path traversal"):
Task( Task(
description="Test task", description="Test task",
expected_output="Test output", expected_output="Test output",
output_file="folder/../output.txt" output_file="folder/../output.txt",
) )
with pytest.raises(ValueError, match="Shell special characters"): with pytest.raises(ValueError, match="Shell special characters"):
Task( Task(
description="Test task", description="Test task",
expected_output="Test output", expected_output="Test output",
output_file="output.txt | rm -rf /" output_file="output.txt | rm -rf /",
) )
with pytest.raises(ValueError, match="Shell expansion"): with pytest.raises(ValueError, match="Shell expansion"):
Task( Task(
description="Test task", description="Test task",
expected_output="Test output", expected_output="Test output",
output_file="~/output.txt" output_file="~/output.txt",
) )
with pytest.raises(ValueError, match="Shell expansion"): with pytest.raises(ValueError, match="Shell expansion"):
Task( Task(
description="Test task", description="Test task",
expected_output="Test output", expected_output="Test output",
output_file="$HOME/output.txt" output_file="$HOME/output.txt",
) )
with pytest.raises(ValueError, match="Invalid template variable"): with pytest.raises(ValueError, match="Invalid template variable"):
Task( Task(
description="Test task", description="Test task",
expected_output="Test output", expected_output="Test output",
output_file="{invalid-name}/output.txt" output_file="{invalid-name}/output.txt",
) )

View File

@@ -8,48 +8,50 @@ from crewai.tools.agent_tools.base_agent_tools import BaseAgentTool
class TestAgentTool(BaseAgentTool): class TestAgentTool(BaseAgentTool):
"""Concrete implementation of BaseAgentTool for testing.""" """Concrete implementation of BaseAgentTool for testing."""
def _run(self, *args, **kwargs): def _run(self, *args, **kwargs):
"""Implement required _run method.""" """Implement required _run method."""
return "Test response" return "Test response"
@pytest.mark.parametrize("role_name,should_match", [
('Futel Official Infopoint', True), # exact match @pytest.mark.timeout(60)
(' "Futel Official Infopoint" ', True), # extra quotes and spaces @pytest.mark.parametrize(
('Futel Official Infopoint\n', True), # trailing newline "role_name,should_match",
('"Futel Official Infopoint"', True), # embedded quotes [
(' FUTEL\nOFFICIAL INFOPOINT ', True), # multiple whitespace and newline ("Futel Official Infopoint", True), # exact match
('futel official infopoint', True), # lowercase (' "Futel Official Infopoint" ', True), # extra quotes and spaces
('FUTEL OFFICIAL INFOPOINT', True), # uppercase ("Futel Official Infopoint\n", True), # trailing newline
('Non Existent Agent', False), # non-existent agent ('"Futel Official Infopoint"', True), # embedded quotes
(None, False), # None agent name (" FUTEL\nOFFICIAL INFOPOINT ", True), # multiple whitespace and newline
]) ("futel official infopoint", True), # lowercase
("FUTEL OFFICIAL INFOPOINT", True), # uppercase
("Non Existent Agent", False), # non-existent agent
(None, False), # None agent name
],
)
def test_agent_tool_role_matching(role_name, should_match): def test_agent_tool_role_matching(role_name, should_match):
"""Test that agent tools can match roles regardless of case, whitespace, and special characters.""" """Test that agent tools can match roles regardless of case, whitespace, and special characters."""
# Create test agent # Create test agent
test_agent = Agent( test_agent = Agent(
role='Futel Official Infopoint', role="Futel Official Infopoint",
goal='Answer questions about Futel', goal="Answer questions about Futel",
backstory='Futel Football Club info', backstory="Futel Football Club info",
allow_delegation=False allow_delegation=False,
) )
# Create test agent tool # Create test agent tool
agent_tool = TestAgentTool( agent_tool = TestAgentTool(
name="test_tool", name="test_tool", description="Test tool", agents=[test_agent]
description="Test tool",
agents=[test_agent]
) )
# Test role matching # Test role matching
result = agent_tool._execute( result = agent_tool._execute(agent_name=role_name, task="Test task", context=None)
agent_name=role_name,
task='Test task',
context=None
)
if should_match: if should_match:
assert "coworker mentioned not found" not in result.lower(), \ assert (
f"Should find agent with role name: {role_name}" "coworker mentioned not found" not in result.lower()
), f"Should find agent with role name: {role_name}"
else: else:
assert "coworker mentioned not found" in result.lower(), \ assert (
f"Should not find agent with role name: {role_name}" "coworker mentioned not found" in result.lower()
), f"Should not find agent with role name: {role_name}"

View File

@@ -8,6 +8,7 @@ from crewai.task import Task
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
@pytest.mark.timeout(60)
def test_task_without_guardrail(): def test_task_without_guardrail():
"""Test that tasks work normally without guardrails (backward compatibility).""" """Test that tasks work normally without guardrails (backward compatibility)."""
agent = Mock() agent = Mock()
@@ -15,18 +16,17 @@ def test_task_without_guardrail():
agent.execute_task.return_value = "test result" agent.execute_task.return_value = "test result"
agent.crew = None agent.crew = None
task = Task( task = Task(description="Test task", expected_output="Output")
description="Test task",
expected_output="Output"
)
result = task.execute_sync(agent=agent) result = task.execute_sync(agent=agent)
assert isinstance(result, TaskOutput) assert isinstance(result, TaskOutput)
assert result.raw == "test result" assert result.raw == "test result"
@pytest.mark.timeout(60)
def test_task_with_successful_guardrail(): def test_task_with_successful_guardrail():
"""Test that successful guardrail validation passes transformed result.""" """Test that successful guardrail validation passes transformed result."""
def guardrail(result: TaskOutput): def guardrail(result: TaskOutput):
return (True, result.raw.upper()) return (True, result.raw.upper())
@@ -35,35 +35,30 @@ def test_task_with_successful_guardrail():
agent.execute_task.return_value = "test result" agent.execute_task.return_value = "test result"
agent.crew = None agent.crew = None
task = Task( task = Task(description="Test task", expected_output="Output", guardrail=guardrail)
description="Test task",
expected_output="Output",
guardrail=guardrail
)
result = task.execute_sync(agent=agent) result = task.execute_sync(agent=agent)
assert isinstance(result, TaskOutput) assert isinstance(result, TaskOutput)
assert result.raw == "TEST RESULT" assert result.raw == "TEST RESULT"
@pytest.mark.timeout(60)
def test_task_with_failing_guardrail(): def test_task_with_failing_guardrail():
"""Test that failing guardrail triggers retry with error context.""" """Test that failing guardrail triggers retry with error context."""
def guardrail(result: TaskOutput): def guardrail(result: TaskOutput):
return (False, "Invalid format") return (False, "Invalid format")
agent = Mock() agent = Mock()
agent.role = "test_agent" agent.role = "test_agent"
agent.execute_task.side_effect = [ agent.execute_task.side_effect = ["bad result", "good result"]
"bad result",
"good result"
]
agent.crew = None agent.crew = None
task = Task( task = Task(
description="Test task", description="Test task",
expected_output="Output", expected_output="Output",
guardrail=guardrail, guardrail=guardrail,
max_retries=1 max_retries=1,
) )
# First execution fails guardrail, second succeeds # First execution fails guardrail, second succeeds
@@ -75,8 +70,10 @@ def test_task_with_failing_guardrail():
assert task.retry_count == 1 assert task.retry_count == 1
@pytest.mark.timeout(60)
def test_task_with_guardrail_retries(): def test_task_with_guardrail_retries():
"""Test that guardrail respects max_retries configuration.""" """Test that guardrail respects max_retries configuration."""
def guardrail(result: TaskOutput): def guardrail(result: TaskOutput):
return (False, "Invalid format") return (False, "Invalid format")
@@ -89,7 +86,7 @@ def test_task_with_guardrail_retries():
description="Test task", description="Test task",
expected_output="Output", expected_output="Output",
guardrail=guardrail, guardrail=guardrail,
max_retries=2 max_retries=2,
) )
with pytest.raises(Exception) as exc_info: with pytest.raises(Exception) as exc_info:
@@ -100,8 +97,10 @@ def test_task_with_guardrail_retries():
assert "Invalid format" in str(exc_info.value) assert "Invalid format" in str(exc_info.value)
@pytest.mark.timeout(60)
def test_guardrail_error_in_context(): def test_guardrail_error_in_context():
"""Test that guardrail error is passed in context for retry.""" """Test that guardrail error is passed in context for retry."""
def guardrail(result: TaskOutput): def guardrail(result: TaskOutput):
return (False, "Expected JSON, got string") return (False, "Expected JSON, got string")
@@ -113,11 +112,12 @@ def test_guardrail_error_in_context():
description="Test task", description="Test task",
expected_output="Output", expected_output="Output",
guardrail=guardrail, guardrail=guardrail,
max_retries=1 max_retries=1,
) )
# Mock execute_task to succeed on second attempt # Mock execute_task to succeed on second attempt
first_call = True first_call = True
def execute_task(task, context, tools): def execute_task(task, context, tools):
nonlocal first_call nonlocal first_call
if first_call: if first_call: