tests should work now

This commit is contained in:
Brandon Hancock
2025-01-02 14:18:11 -05:00
parent 11ff8270ea
commit db61f5dd59
13 changed files with 10 additions and 264 deletions

View File

@@ -13,6 +13,7 @@ dependencies = [
"openai>=1.13.3",
"litellm>=1.44.22",
"instructor>=1.3.3",
"crewai-tools>=0.17.0",
# Text Processing
"pdfplumber>=0.11.4",
@@ -71,7 +72,6 @@ docling = [
[tool.uv]
dev-dependencies = [
"crewai-tools>=0.17.0",
"ruff>=0.8.2",
"mypy>=1.10.0",
"pre-commit>=3.6.0",

View File

@@ -20,7 +20,6 @@ from crewai.utilities import RPMController
from crewai.utilities.events import Emitter
@pytest.mark.timeout(60) # 1 minute timeout
def test_agent_llm_creation_with_env_vars():
# Store original environment variables
original_api_key = os.environ.get("OPENAI_API_KEY")
@@ -64,7 +63,6 @@ def test_agent_llm_creation_with_env_vars():
os.environ["OPENAI_MODEL_NAME"] = original_model_name
@pytest.mark.timeout(60)
def test_agent_creation():
agent = Agent(role="test role", goal="test goal", backstory="test backstory")
@@ -74,14 +72,12 @@ def test_agent_creation():
assert agent.tools == []
@pytest.mark.timeout(60)
def test_agent_default_values():
agent = Agent(role="test role", goal="test goal", backstory="test backstory")
assert agent.llm.model == "gpt-4o-mini"
assert agent.allow_delegation is False
@pytest.mark.timeout(60)
def test_custom_llm():
agent = Agent(
role="test role", goal="test goal", backstory="test backstory", llm="gpt-4"
@@ -89,7 +85,6 @@ def test_custom_llm():
assert agent.llm.model == "gpt-4"
@pytest.mark.timeout(60)
def test_custom_llm_with_langchain():
from langchain_openai import ChatOpenAI
@@ -103,7 +98,6 @@ def test_custom_llm_with_langchain():
assert agent.llm.model == "gpt-4"
@pytest.mark.timeout(60)
def test_custom_llm_temperature_preservation():
from langchain_openai import ChatOpenAI
@@ -121,7 +115,6 @@ def test_custom_llm_temperature_preservation():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_execute_task():
from langchain_openai import ChatOpenAI
@@ -151,7 +144,6 @@ def test_agent_execute_task():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_execution():
agent = Agent(
role="test role",
@@ -171,7 +163,6 @@ def test_agent_execution():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_execution_with_tools():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -203,7 +194,6 @@ def test_agent_execution_with_tools():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_logging_tool_usage():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -238,7 +228,6 @@ def test_logging_tool_usage():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_cache_hitting():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -311,7 +300,6 @@ def test_cache_hitting():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_disabling_cache_for_agent():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -376,7 +364,6 @@ def test_disabling_cache_for_agent():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_execution_with_specific_tools():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -400,7 +387,6 @@ def test_agent_execution_with_specific_tools():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_powered_by_new_o_model_family_that_allows_skipping_tool():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -427,7 +413,6 @@ def test_agent_powered_by_new_o_model_family_that_allows_skipping_tool():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_powered_by_new_o_model_family_that_uses_tool():
@tool
def comapny_customer_data() -> float:
@@ -454,7 +439,6 @@ def test_agent_powered_by_new_o_model_family_that_uses_tool():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_custom_max_iterations():
@tool
def get_final_answer() -> float:
@@ -485,7 +469,6 @@ def test_agent_custom_max_iterations():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_repeated_tool_usage(capsys):
@tool
def get_final_answer() -> float:
@@ -523,7 +506,6 @@ def test_agent_repeated_tool_usage(capsys):
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys):
@tool
def get_final_answer(anything: str) -> float:
@@ -560,7 +542,6 @@ def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys):
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_moved_on_after_max_iterations():
@tool
def get_final_answer() -> float:
@@ -588,7 +569,6 @@ def test_agent_moved_on_after_max_iterations():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_respect_the_max_rpm_set(capsys):
@tool
def get_final_answer() -> float:
@@ -623,7 +603,6 @@ def test_agent_respect_the_max_rpm_set(capsys):
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
from unittest.mock import patch
@@ -662,7 +641,6 @@ def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_without_max_rpm_respet_crew_rpm(capsys):
from unittest.mock import patch
@@ -717,7 +695,6 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_error_on_parsing_tool(capsys):
from unittest.mock import patch
@@ -761,7 +738,6 @@ def test_agent_error_on_parsing_tool(capsys):
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_remembers_output_format_after_using_tools_too_many_times():
from unittest.mock import patch
@@ -797,7 +773,6 @@ def test_agent_remembers_output_format_after_using_tools_too_many_times():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_use_specific_tasks_output_as_context(capsys):
agent1 = Agent(role="test role", goal="test goal", backstory="test backstory")
agent2 = Agent(role="test role2", goal="test goal2", backstory="test backstory2")
@@ -825,7 +800,6 @@ def test_agent_use_specific_tasks_output_as_context(capsys):
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_step_callback():
class StepCallback:
def callback(self, step):
@@ -860,7 +834,6 @@ def test_agent_step_callback():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_function_calling_llm():
llm = "gpt-4o"
@@ -907,7 +880,6 @@ def test_agent_function_calling_llm():
mock_original_tool_calling.assert_called()
@pytest.mark.timeout(60)
def test_agent_count_formatting_error():
from unittest.mock import patch
@@ -928,7 +900,6 @@ def test_agent_count_formatting_error():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
from crewai.tools import BaseTool
@@ -959,7 +930,6 @@ def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_tool_usage_information_is_appended_to_agent():
from crewai.tools import BaseTool
@@ -996,7 +966,6 @@ def test_tool_usage_information_is_appended_to_agent():
]
@pytest.mark.timeout(60)
def test_agent_definition_based_on_dict():
config = {
"role": "test role",
@@ -1016,7 +985,6 @@ def test_agent_definition_based_on_dict():
# test for human input
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_human_input():
# Agent configuration
config = {
@@ -1057,7 +1025,6 @@ def test_agent_human_input():
assert output.strip().lower() == "hello" # Final output should be 'Hello'
@pytest.mark.timeout(60)
def test_interpolate_inputs():
agent = Agent(
role="{topic} specialist",
@@ -1076,7 +1043,6 @@ def test_interpolate_inputs():
assert agent.backstory == "I am the master of nothing"
@pytest.mark.timeout(60)
def test_not_using_system_prompt():
agent = Agent(
role="{topic} specialist",
@@ -1090,7 +1056,6 @@ def test_not_using_system_prompt():
assert not agent.agent_executor.prompt.get("system")
@pytest.mark.timeout(60)
def test_using_system_prompt():
agent = Agent(
role="{topic} specialist",
@@ -1103,7 +1068,6 @@ def test_using_system_prompt():
assert agent.agent_executor.prompt.get("system")
@pytest.mark.timeout(60)
def test_system_and_prompt_template():
agent = Agent(
role="{topic} specialist",
@@ -1157,7 +1121,6 @@ Thought:<|eot_id|>
@patch("crewai.agent.CrewTrainingHandler")
@pytest.mark.timeout(60)
def test_agent_training_handler(crew_training_handler):
task_prompt = "What is 1 + 1?"
agent = Agent(
@@ -1180,7 +1143,6 @@ def test_agent_training_handler(crew_training_handler):
@patch("crewai.agent.CrewTrainingHandler")
@pytest.mark.timeout(60)
def test_agent_use_trained_data(crew_training_handler):
task_prompt = "What is 1 + 1?"
agent = Agent(
@@ -1209,7 +1171,6 @@ def test_agent_use_trained_data(crew_training_handler):
)
@pytest.mark.timeout(60)
def test_agent_max_retry_limit():
agent = Agent(
role="test role",
@@ -1263,7 +1224,6 @@ def test_agent_max_retry_limit():
)
@pytest.mark.timeout(60)
def test_agent_with_llm():
agent = Agent(
role="test role",
@@ -1277,7 +1237,6 @@ def test_agent_with_llm():
assert agent.llm.temperature == 0.7
@pytest.mark.timeout(60)
def test_agent_with_custom_stop_words():
stop_words = ["STOP", "END"]
agent = Agent(
@@ -1293,7 +1252,6 @@ def test_agent_with_custom_stop_words():
assert "\nObservation:" in agent.llm.stop
@pytest.mark.timeout(60)
def test_agent_with_callbacks():
def dummy_callback(response):
pass
@@ -1310,7 +1268,6 @@ def test_agent_with_callbacks():
assert agent.llm.callbacks[0] == dummy_callback
@pytest.mark.timeout(60)
def test_agent_with_additional_kwargs():
agent = Agent(
role="test role",
@@ -1334,7 +1291,6 @@ def test_agent_with_additional_kwargs():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_llm_call():
llm = LLM(model="gpt-3.5-turbo")
messages = [{"role": "user", "content": "Say 'Hello, World!'"}]
@@ -1344,7 +1300,6 @@ def test_llm_call():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_llm_call_with_error():
llm = LLM(model="non-existent-model")
messages = [{"role": "user", "content": "This should fail"}]
@@ -1354,7 +1309,6 @@ def test_llm_call_with_error():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_handle_context_length_exceeds_limit():
agent = Agent(
role="test role",
@@ -1399,7 +1353,6 @@ def test_handle_context_length_exceeds_limit():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_handle_context_length_exceeds_limit_cli_no():
agent = Agent(
role="test role",
@@ -1427,7 +1380,6 @@ def test_handle_context_length_exceeds_limit_cli_no():
mock_handle_context.assert_not_called()
@pytest.mark.timeout(60)
def test_agent_with_all_llm_attributes():
agent = Agent(
role="test role",
@@ -1476,7 +1428,6 @@ def test_agent_with_all_llm_attributes():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_llm_call_with_all_attributes():
llm = LLM(
model="gpt-3.5-turbo",
@@ -1494,7 +1445,6 @@ def test_llm_call_with_all_attributes():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_with_ollama_llama3():
agent = Agent(
role="test role",
@@ -1516,7 +1466,6 @@ def test_agent_with_ollama_llama3():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_llm_call_with_ollama_llama3():
llm = LLM(
model="ollama/llama3.2:3b",
@@ -1534,7 +1483,6 @@ def test_llm_call_with_ollama_llama3():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_execute_task_basic():
agent = Agent(
role="test role",
@@ -1554,7 +1502,6 @@ def test_agent_execute_task_basic():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_execute_task_with_context():
agent = Agent(
role="test role",
@@ -1577,7 +1524,6 @@ def test_agent_execute_task_with_context():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_execute_task_with_tool():
@tool
def dummy_tool(query: str) -> str:
@@ -1603,7 +1549,6 @@ def test_agent_execute_task_with_tool():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_execute_task_with_custom_llm():
agent = Agent(
role="test role",
@@ -1625,7 +1570,6 @@ def test_agent_execute_task_with_custom_llm():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_execute_task_with_ollama():
agent = Agent(
role="test role",
@@ -1646,7 +1590,6 @@ def test_agent_execute_task_with_ollama():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_with_knowledge_sources():
# Create a knowledge source with some content
content = "Brandon's favorite color is red and he likes Mexican food."

View File

@@ -15,7 +15,6 @@ def parser():
return p
@pytest.mark.timeout(60)
def test_valid_action_parsing_special_characters(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: what's the temperature in SF?"
result = parser.parse(text)
@@ -24,7 +23,6 @@ def test_valid_action_parsing_special_characters(parser):
assert result.tool_input == "what's the temperature in SF?"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_json_tool_input(parser):
text = """
Thought: Let's find the information
@@ -38,7 +36,6 @@ def test_valid_action_parsing_with_json_tool_input(parser):
assert result.tool_input == expected_tool_input
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_quotes(parser):
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "temperature in SF"'
result = parser.parse(text)
@@ -47,7 +44,6 @@ def test_valid_action_parsing_with_quotes(parser):
assert result.tool_input == "temperature in SF"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_curly_braces(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: {temperature in SF}"
result = parser.parse(text)
@@ -56,7 +52,6 @@ def test_valid_action_parsing_with_curly_braces(parser):
assert result.tool_input == "{temperature in SF}"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_angle_brackets(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: <temperature in SF>"
result = parser.parse(text)
@@ -65,7 +60,6 @@ def test_valid_action_parsing_with_angle_brackets(parser):
assert result.tool_input == "<temperature in SF>"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_parentheses(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: (temperature in SF)"
result = parser.parse(text)
@@ -74,7 +68,6 @@ def test_valid_action_parsing_with_parentheses(parser):
assert result.tool_input == "(temperature in SF)"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_mixed_brackets(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: [temperature in {SF}]"
result = parser.parse(text)
@@ -83,7 +76,6 @@ def test_valid_action_parsing_with_mixed_brackets(parser):
assert result.tool_input == "[temperature in {SF}]"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_nested_quotes(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what's the temperature in 'SF'?\""
result = parser.parse(text)
@@ -92,7 +84,6 @@ def test_valid_action_parsing_with_nested_quotes(parser):
assert result.tool_input == "what's the temperature in 'SF'?"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_incomplete_json(parser):
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: {"query": "temperature in SF"'
result = parser.parse(text)
@@ -101,7 +92,6 @@ def test_valid_action_parsing_with_incomplete_json(parser):
assert result.tool_input == '{"query": "temperature in SF"}'
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_special_characters(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is the temperature in SF? @$%^&*"
result = parser.parse(text)
@@ -110,7 +100,6 @@ def test_valid_action_parsing_with_special_characters(parser):
assert result.tool_input == "what is the temperature in SF? @$%^&*"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_combination(parser):
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "[what is the temperature in SF?]"'
result = parser.parse(text)
@@ -119,7 +108,6 @@ def test_valid_action_parsing_with_combination(parser):
assert result.tool_input == "[what is the temperature in SF?]"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_mixed_quotes(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what's the temperature in SF?\""
result = parser.parse(text)
@@ -128,7 +116,6 @@ def test_valid_action_parsing_with_mixed_quotes(parser):
assert result.tool_input == "what's the temperature in SF?"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_newlines(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is\nthe temperature in SF?"
result = parser.parse(text)
@@ -137,7 +124,6 @@ def test_valid_action_parsing_with_newlines(parser):
assert result.tool_input == "what is\nthe temperature in SF?"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_escaped_characters(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is the temperature in SF? \\n"
result = parser.parse(text)
@@ -146,7 +132,6 @@ def test_valid_action_parsing_with_escaped_characters(parser):
assert result.tool_input == "what is the temperature in SF? \\n"
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_json_string(parser):
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: {"query": "temperature in SF"}'
result = parser.parse(text)
@@ -155,7 +140,6 @@ def test_valid_action_parsing_with_json_string(parser):
assert result.tool_input == '{"query": "temperature in SF"}'
@pytest.mark.timeout(60)
def test_valid_action_parsing_with_unbalanced_quotes(parser):
text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what is the temperature in SF?"
result = parser.parse(text)
@@ -164,70 +148,60 @@ def test_valid_action_parsing_with_unbalanced_quotes(parser):
assert result.tool_input == "what is the temperature in SF?"
@pytest.mark.timeout(60)
def test_clean_action_no_formatting(parser):
action = "Ask question to senior researcher"
cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_leading_asterisks(parser):
action = "** Ask question to senior researcher"
cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_trailing_asterisks(parser):
action = "Ask question to senior researcher **"
cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_leading_and_trailing_asterisks(parser):
action = "** Ask question to senior researcher **"
cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_multiple_leading_asterisks(parser):
action = "**** Ask question to senior researcher"
cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_multiple_trailing_asterisks(parser):
action = "Ask question to senior researcher ****"
cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_spaces_and_asterisks(parser):
action = " ** Ask question to senior researcher ** "
cleaned_action = parser._clean_action(action)
assert cleaned_action == "Ask question to senior researcher"
@pytest.mark.timeout(60)
def test_clean_action_with_only_asterisks(parser):
action = "****"
cleaned_action = parser._clean_action(action)
assert cleaned_action == ""
@pytest.mark.timeout(60)
def test_clean_action_with_empty_string(parser):
action = ""
cleaned_action = parser._clean_action(action)
assert cleaned_action == ""
@pytest.mark.timeout(60)
def test_valid_final_answer_parsing(parser):
text = (
"Thought: I found the information\nFinal Answer: The temperature is 100 degrees"
@@ -237,7 +211,6 @@ def test_valid_final_answer_parsing(parser):
assert result.output == "The temperature is 100 degrees"
@pytest.mark.timeout(60)
def test_missing_action_error(parser):
text = "Thought: Let's find the temperature\nAction Input: what is the temperature in SF?"
with pytest.raises(OutputParserException) as exc_info:
@@ -247,7 +220,6 @@ def test_missing_action_error(parser):
)
@pytest.mark.timeout(60)
def test_missing_action_input_error(parser):
text = "Thought: Let's find the temperature\nAction: search"
with pytest.raises(OutputParserException) as exc_info:
@@ -255,7 +227,6 @@ def test_missing_action_input_error(parser):
assert "I missed the 'Action Input:' after 'Action:'." in str(exc_info.value)
@pytest.mark.timeout(60)
def test_action_and_final_answer_error(parser):
text = "Thought: I found the information\nAction: search\nAction Input: what is the temperature in SF?\nFinal Answer: The temperature is 100 degrees"
with pytest.raises(OutputParserException) as exc_info:
@@ -263,7 +234,6 @@ def test_action_and_final_answer_error(parser):
assert "both perform Action and give a Final Answer" in str(exc_info.value)
@pytest.mark.timeout(60)
def test_safe_repair_json(parser):
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": Senior Researcher'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -271,14 +241,12 @@ def test_safe_repair_json(parser):
assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_unrepairable(parser):
invalid_json = "{invalid_json"
result = parser._safe_repair_json(invalid_json)
assert result == invalid_json # Should return the original if unrepairable
@pytest.mark.timeout(60)
def test_safe_repair_json_missing_quotes(parser):
invalid_json = (
'{task: "Research XAI", context: "Explainable AI", coworker: Senior Researcher}'
@@ -288,7 +256,6 @@ def test_safe_repair_json_missing_quotes(parser):
assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_unclosed_brackets(parser):
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -296,7 +263,6 @@ def test_safe_repair_json_unclosed_brackets(parser):
assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_extra_commas(parser):
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher",}'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -304,7 +270,6 @@ def test_safe_repair_json_extra_commas(parser):
assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_trailing_commas(parser):
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher",}'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -312,7 +277,6 @@ def test_safe_repair_json_trailing_commas(parser):
assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_single_quotes(parser):
invalid_json = "{'task': 'Research XAI', 'context': 'Explainable AI', 'coworker': 'Senior Researcher'}"
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -320,7 +284,6 @@ def test_safe_repair_json_single_quotes(parser):
assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_mixed_quotes(parser):
invalid_json = "{'task': \"Research XAI\", 'context': \"Explainable AI\", 'coworker': 'Senior Researcher'}"
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -328,7 +291,6 @@ def test_safe_repair_json_mixed_quotes(parser):
assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_unescaped_characters(parser):
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher\n"}'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -336,7 +298,6 @@ def test_safe_repair_json_unescaped_characters(parser):
assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_missing_colon(parser):
invalid_json = '{"task" "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -344,7 +305,6 @@ def test_safe_repair_json_missing_colon(parser):
assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_missing_comma(parser):
invalid_json = '{"task": "Research XAI" "context": "Explainable AI", "coworker": "Senior Researcher"}'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -352,7 +312,6 @@ def test_safe_repair_json_missing_comma(parser):
assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_unexpected_trailing_characters(parser):
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"} random text'
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
@@ -360,7 +319,6 @@ def test_safe_repair_json_unexpected_trailing_characters(parser):
assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_safe_repair_json_special_characters_key(parser):
invalid_json = '{"task!@#": "Research XAI", "context$%^": "Explainable AI", "coworker&*()": "Senior Researcher"}'
expected_repaired_json = '{"task!@#": "Research XAI", "context$%^": "Explainable AI", "coworker&*()": "Senior Researcher"}'
@@ -368,7 +326,6 @@ def test_safe_repair_json_special_characters_key(parser):
assert result == expected_repaired_json
@pytest.mark.timeout(60)
def test_parsing_with_whitespace(parser):
text = " Thought: Let's find the temperature \n Action: search \n Action Input: what is the temperature in SF? "
result = parser.parse(text)
@@ -377,7 +334,6 @@ def test_parsing_with_whitespace(parser):
assert result.tool_input == "what is the temperature in SF?"
@pytest.mark.timeout(60)
def test_parsing_with_special_characters(parser):
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "what is the temperature in SF?"'
result = parser.parse(text)
@@ -386,7 +342,6 @@ def test_parsing_with_special_characters(parser):
assert result.tool_input == "what is the temperature in SF?"
@pytest.mark.timeout(60)
def test_integration_valid_and_invalid(parser):
text = """
Thought: Let's find the temperature

View File

@@ -11,7 +11,6 @@ class TestAuthenticationCommand(unittest.TestCase):
def setUp(self):
self.auth_command = AuthenticationCommand()
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.main.requests.post")
def test_get_device_code(self, mock_post):
mock_response = MagicMock()
@@ -32,7 +31,6 @@ class TestAuthenticationCommand(unittest.TestCase):
)
self.assertEqual(device_code_data["interval"], 5)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.main.console.print")
@patch("crewai.cli.authentication.main.webbrowser.open")
def test_display_auth_instructions(self, mock_open, mock_print):
@@ -47,7 +45,6 @@ class TestAuthenticationCommand(unittest.TestCase):
mock_print.assert_any_call("2. Enter the following code: ", "ABCDEF")
mock_open.assert_called_once_with("https://example.com")
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.main.ToolCommand")
@patch("crewai.cli.authentication.main.requests.post")
@patch("crewai.cli.authentication.main.validate_token")
@@ -73,7 +70,6 @@ class TestAuthenticationCommand(unittest.TestCase):
"\n[bold green]Welcome to CrewAI Enterprise![/bold green]\n"
)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.main.requests.post")
@patch("crewai.cli.authentication.main.console.print")
def test_poll_for_token_error(self, mock_print, mock_post):
@@ -90,7 +86,6 @@ class TestAuthenticationCommand(unittest.TestCase):
mock_print.assert_not_called()
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.main.requests.post")
@patch("crewai.cli.authentication.main.console.print")
def test_poll_for_token_timeout(self, mock_print, mock_post):

View File

@@ -10,7 +10,6 @@ from crewai.cli.authentication.utils import TokenManager, validate_token
class TestValidateToken(unittest.TestCase):
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.AsymmetricSignatureVerifier")
@patch("crewai.cli.authentication.utils.TokenVerifier")
def test_validate_token(self, mock_token_verifier, mock_asymmetric_verifier):
@@ -34,7 +33,6 @@ class TestTokenManager(unittest.TestCase):
def setUp(self):
self.token_manager = TokenManager()
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.read_secure_file")
@patch("crewai.cli.authentication.utils.TokenManager.save_secure_file")
@patch("crewai.cli.authentication.utils.TokenManager._get_or_create_key")
@@ -47,7 +45,6 @@ class TestTokenManager(unittest.TestCase):
self.assertEqual(result, mock_key)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.Fernet.generate_key")
@patch("crewai.cli.authentication.utils.TokenManager.read_secure_file")
@patch("crewai.cli.authentication.utils.TokenManager.save_secure_file")
@@ -63,7 +60,6 @@ class TestTokenManager(unittest.TestCase):
mock_generate.assert_called_once()
mock_save.assert_called_once_with("secret.key", mock_key)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.save_secure_file")
def test_save_tokens(self, mock_save):
access_token = "test_token"
@@ -84,7 +80,6 @@ class TestTokenManager(unittest.TestCase):
delta=timedelta(seconds=1),
)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.read_secure_file")
def test_get_token_valid(self, mock_read):
access_token = "test_token"
@@ -97,7 +92,6 @@ class TestTokenManager(unittest.TestCase):
self.assertEqual(result, access_token)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.read_secure_file")
def test_get_token_expired(self, mock_read):
access_token = "test_token"
@@ -110,7 +104,6 @@ class TestTokenManager(unittest.TestCase):
self.assertIsNone(result)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.get_secure_storage_path")
@patch("builtins.open", new_callable=unittest.mock.mock_open)
@patch("crewai.cli.authentication.utils.os.chmod")
@@ -127,7 +120,6 @@ class TestTokenManager(unittest.TestCase):
mock_open().write.assert_called_once_with(content)
mock_chmod.assert_called_once_with(mock_path.__truediv__.return_value, 0o600)
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.get_secure_storage_path")
@patch(
"builtins.open", new_callable=unittest.mock.mock_open, read_data=b"test_content"
@@ -144,7 +136,6 @@ class TestTokenManager(unittest.TestCase):
mock_path.__truediv__.assert_called_once_with(filename)
mock_open.assert_called_once_with(mock_path.__truediv__.return_value, "rb")
@pytest.mark.timeout(60)
@patch("crewai.cli.authentication.utils.TokenManager.get_secure_storage_path")
def test_read_secure_file_not_exists(self, mock_get_path):
mock_path = MagicMock()

View File

@@ -12,7 +12,6 @@ from crewai.cli.utils import parse_toml
class TestDeployCommand(unittest.TestCase):
@pytest.mark.timeout(60)
@patch("crewai.cli.command.get_auth_token")
@patch("crewai.cli.deploy.main.get_project_name")
@patch("crewai.cli.command.PlusAPI")
@@ -27,12 +26,10 @@ class TestDeployCommand(unittest.TestCase):
self.deploy_command = DeployCommand()
self.mock_client = self.deploy_command.plus_api_client
@pytest.mark.timeout(60)
def test_init_success(self):
self.assertEqual(self.deploy_command.project_name, "test_project")
self.mock_plus_api.assert_called_once_with(api_key="test_token")
@pytest.mark.timeout(60)
@patch("crewai.cli.command.get_auth_token")
def test_init_failure(self, mock_get_auth_token):
mock_get_auth_token.side_effect = Exception("Auth failed")
@@ -40,7 +37,6 @@ class TestDeployCommand(unittest.TestCase):
with self.assertRaises(SystemExit):
DeployCommand()
@pytest.mark.timeout(60)
def test_validate_response_successful_response(self):
mock_response = Mock(spec=requests.Response)
mock_response.json.return_value = {"message": "Success"}
@@ -51,7 +47,6 @@ class TestDeployCommand(unittest.TestCase):
self.deploy_command._validate_response(mock_response)
assert fake_out.getvalue() == ""
@pytest.mark.timeout(60)
def test_validate_response_json_decode_error(self):
mock_response = Mock(spec=requests.Response)
mock_response.json.side_effect = JSONDecodeError("Decode error", "", 0)
@@ -69,7 +64,6 @@ class TestDeployCommand(unittest.TestCase):
assert "Status Code: 500" in output
assert "Response:\nb'Invalid JSON'" in output
@pytest.mark.timeout(60)
def test_validate_response_422_error(self):
mock_response = Mock(spec=requests.Response)
mock_response.json.return_value = {
@@ -90,7 +84,6 @@ class TestDeployCommand(unittest.TestCase):
assert "Field1 Error message 1" in output
assert "Field2 Error message 2" in output
@pytest.mark.timeout(60)
def test_validate_response_other_error(self):
mock_response = Mock(spec=requests.Response)
mock_response.json.return_value = {"error": "Something went wrong"}
@@ -104,13 +97,11 @@ class TestDeployCommand(unittest.TestCase):
assert "Request to Enterprise API failed. Details:" in output
assert "Details:\nSomething went wrong" in output
@pytest.mark.timeout(60)
def test_standard_no_param_error_message(self):
with patch("sys.stdout", new=StringIO()) as fake_out:
self.deploy_command._standard_no_param_error_message()
self.assertIn("No UUID provided", fake_out.getvalue())
@pytest.mark.timeout(60)
def test_display_deployment_info(self):
with patch("sys.stdout", new=StringIO()) as fake_out:
self.deploy_command._display_deployment_info(
@@ -120,7 +111,6 @@ class TestDeployCommand(unittest.TestCase):
self.assertIn("test-uuid", fake_out.getvalue())
self.assertIn("deployed", fake_out.getvalue())
@pytest.mark.timeout(60)
def test_display_logs(self):
with patch("sys.stdout", new=StringIO()) as fake_out:
self.deploy_command._display_logs(
@@ -128,7 +118,6 @@ class TestDeployCommand(unittest.TestCase):
)
self.assertIn("2023-01-01 - INFO: Test log", fake_out.getvalue())
@pytest.mark.timeout(60)
@patch("crewai.cli.deploy.main.DeployCommand._display_deployment_info")
def test_deploy_with_uuid(self, mock_display):
mock_response = MagicMock()
@@ -141,7 +130,6 @@ class TestDeployCommand(unittest.TestCase):
self.mock_client.deploy_by_uuid.assert_called_once_with("test-uuid")
mock_display.assert_called_once_with({"uuid": "test-uuid"})
@pytest.mark.timeout(60)
@patch("crewai.cli.deploy.main.DeployCommand._display_deployment_info")
def test_deploy_with_project_name(self, mock_display):
mock_response = MagicMock()
@@ -154,7 +142,6 @@ class TestDeployCommand(unittest.TestCase):
self.mock_client.deploy_by_name.assert_called_once_with("test_project")
mock_display.assert_called_once_with({"uuid": "test-uuid"})
@pytest.mark.timeout(60)
@patch("crewai.cli.deploy.main.fetch_and_json_env_file")
@patch("crewai.cli.deploy.main.git.Repository.origin_url")
@patch("builtins.input")
@@ -173,7 +160,6 @@ class TestDeployCommand(unittest.TestCase):
self.assertIn("Deployment created successfully!", fake_out.getvalue())
self.assertIn("new-uuid", fake_out.getvalue())
@pytest.mark.timeout(60)
def test_list_crews(self):
mock_response = MagicMock()
mock_response.status_code = 200
@@ -188,7 +174,6 @@ class TestDeployCommand(unittest.TestCase):
self.assertIn("Crew1 (uuid1) active", fake_out.getvalue())
self.assertIn("Crew2 (uuid2) inactive", fake_out.getvalue())
@pytest.mark.timeout(60)
def test_get_crew_status(self):
mock_response = MagicMock()
mock_response.status_code = 200
@@ -200,7 +185,6 @@ class TestDeployCommand(unittest.TestCase):
self.assertIn("TestCrew", fake_out.getvalue())
self.assertIn("active", fake_out.getvalue())
@pytest.mark.timeout(60)
def test_get_crew_logs(self):
mock_response = MagicMock()
mock_response.status_code = 200
@@ -215,7 +199,6 @@ class TestDeployCommand(unittest.TestCase):
self.assertIn("2023-01-01 - INFO: Log1", fake_out.getvalue())
self.assertIn("2023-01-02 - ERROR: Log2", fake_out.getvalue())
@pytest.mark.timeout(60)
def test_remove_crew(self):
mock_response = MagicMock()
mock_response.status_code = 204
@@ -227,7 +210,6 @@ class TestDeployCommand(unittest.TestCase):
"Crew 'test_project' removed successfully", fake_out.getvalue()
)
@pytest.mark.timeout(60)
@unittest.skipIf(sys.version_info < (3, 11), "Requires Python 3.11+")
def test_parse_toml_python_311_plus(self):
toml_content = """
@@ -242,7 +224,6 @@ class TestDeployCommand(unittest.TestCase):
parsed = parse_toml(toml_content)
self.assertEqual(parsed["tool"]["poetry"]["name"], "test_project")
@pytest.mark.timeout(60)
@patch(
"builtins.open",
new_callable=unittest.mock.mock_open,
@@ -261,7 +242,6 @@ class TestDeployCommand(unittest.TestCase):
print("project_name", project_name)
self.assertEqual(project_name, "test_project")
@pytest.mark.timeout(60)
@unittest.skipIf(sys.version_info < (3, 11), "Requires Python 3.11+")
@patch(
"builtins.open",
@@ -280,7 +260,6 @@ class TestDeployCommand(unittest.TestCase):
project_name = get_project_name()
self.assertEqual(project_name, "test_project")
@pytest.mark.timeout(60)
def test_get_crewai_version(self):
from crewai.cli.version import get_crewai_version

View File

@@ -24,7 +24,6 @@ def in_temp_dir():
os.chdir(original_dir)
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.subprocess.run")
def test_create_success(mock_subprocess):
with in_temp_dir():
@@ -55,7 +54,6 @@ def test_create_success(mock_subprocess):
assert "Creating custom tool test_tool..." in output
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.subprocess.run")
@patch("crewai.cli.plus_api.PlusAPI.get_tool")
def test_install_success(mock_get, mock_subprocess_run):
@@ -92,7 +90,6 @@ def test_install_success(mock_get, mock_subprocess_run):
assert "Successfully installed sample-tool" in output
@pytest.mark.timeout(60)
@patch("crewai.cli.plus_api.PlusAPI.get_tool")
def test_install_tool_not_found(mock_get):
mock_get_response = MagicMock()
@@ -112,7 +109,6 @@ def test_install_tool_not_found(mock_get):
assert "No tool found with this name" in output
@pytest.mark.timeout(60)
@patch("crewai.cli.plus_api.PlusAPI.get_tool")
def test_install_api_error(mock_get):
mock_get_response = MagicMock()
@@ -132,7 +128,6 @@ def test_install_api_error(mock_get):
assert "Failed to get tool details" in output
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.git.Repository.is_synced", return_value=False)
def test_publish_when_not_in_sync(mock_is_synced):
with patch("sys.stdout", new=StringIO()) as fake_out, raises(SystemExit):
@@ -142,7 +137,6 @@ def test_publish_when_not_in_sync(mock_is_synced):
assert "Local changes need to be resolved before publishing" in fake_out.getvalue()
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool")
@patch("crewai.cli.tools.main.get_project_version", return_value="1.0.0")
@patch("crewai.cli.tools.main.get_project_description", return_value="A sample tool")
@@ -191,7 +185,6 @@ def test_publish_when_not_in_sync_and_force(
)
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool")
@patch("crewai.cli.tools.main.get_project_version", return_value="1.0.0")
@patch("crewai.cli.tools.main.get_project_description", return_value="A sample tool")
@@ -240,7 +233,6 @@ def test_publish_success(
)
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool")
@patch("crewai.cli.tools.main.get_project_version", return_value="1.0.0")
@patch("crewai.cli.tools.main.get_project_description", return_value="A sample tool")
@@ -280,7 +272,6 @@ def test_publish_failure(
assert "Name is already taken" in output
@pytest.mark.timeout(60)
@patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool")
@patch("crewai.cli.tools.main.get_project_version", return_value="1.0.0")
@patch("crewai.cli.tools.main.get_project_description", return_value="A sample tool")

View File

@@ -47,7 +47,6 @@ writer = Agent(
)
@pytest.mark.timeout(60)
def test_crew_config_conditional_requirement():
with pytest.raises(ValueError):
Crew(process=Process.sequential)
@@ -95,7 +94,6 @@ def test_crew_config_conditional_requirement():
]
@pytest.mark.timeout(60)
def test_async_task_cannot_include_sequential_async_tasks_in_context():
task1 = Task(
description="Task 1",
@@ -144,7 +142,6 @@ def test_async_task_cannot_include_sequential_async_tasks_in_context():
pytest.fail("Unexpected ValidationError raised")
@pytest.mark.timeout(60)
def test_context_no_future_tasks():
task2 = Task(
description="Task 2",
@@ -177,7 +174,6 @@ def test_context_no_future_tasks():
Crew(tasks=[task1, task2, task3, task4], agents=[researcher, writer])
@pytest.mark.timeout(60)
def test_crew_config_with_wrong_keys():
no_tasks_config = json.dumps(
{
@@ -209,7 +205,6 @@ def test_crew_config_with_wrong_keys():
Crew(process=Process.sequential, config=no_agents_config)
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_creation():
tasks = [
@@ -242,7 +237,6 @@ def test_crew_creation():
assert result.raw == expected_string_output
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_sync_task_execution():
from unittest.mock import patch
@@ -284,7 +278,6 @@ def test_sync_task_execution():
assert mock_execute_sync.call_count == len(tasks)
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_hierarchical_process():
task = Task(
@@ -307,7 +300,6 @@ def test_hierarchical_process():
)
@pytest.mark.timeout(60)
def test_manager_llm_requirement_for_hierarchical_process():
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
@@ -322,7 +314,6 @@ def test_manager_llm_requirement_for_hierarchical_process():
)
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_manager_agent_delegating_to_assigned_task_agent():
"""
@@ -375,7 +366,6 @@ def test_manager_agent_delegating_to_assigned_task_agent():
)
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_manager_agent_delegating_to_all_agents():
"""
@@ -409,7 +399,6 @@ def test_manager_agent_delegating_to_all_agents():
)
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_manager_agent_delegates_with_varied_role_cases():
"""
@@ -487,7 +476,6 @@ def test_manager_agent_delegates_with_varied_role_cases():
)
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_with_delegating_agents():
tasks = [
@@ -512,7 +500,6 @@ def test_crew_with_delegating_agents():
)
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_with_delegating_agents_should_not_override_task_tools():
from typing import Type
@@ -575,7 +562,6 @@ def test_crew_with_delegating_agents_should_not_override_task_tools():
), "Delegation tool should be present"
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_with_delegating_agents_should_not_override_agent_tools():
from typing import Type
@@ -640,7 +626,6 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools():
), "Delegation tool should be present"
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_tools_override_agent_tools():
from typing import Type
@@ -696,7 +681,6 @@ def test_task_tools_override_agent_tools():
assert isinstance(new_researcher.tools[0], TestTool)
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_tools_override_agent_tools_with_allow_delegation():
"""
@@ -781,7 +765,6 @@ def test_task_tools_override_agent_tools_with_allow_delegation():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_crew_verbose_output(capsys):
tasks = [
Task(
@@ -828,7 +811,6 @@ def test_crew_verbose_output(capsys):
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_cache_hitting_between_agents():
from unittest.mock import call, patch
@@ -872,7 +854,6 @@ def test_cache_hitting_between_agents():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_api_calls_throttling(capsys):
from unittest.mock import patch
@@ -912,7 +893,6 @@ def test_api_calls_throttling(capsys):
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_crew_kickoff_usage_metrics():
inputs = [
{"topic": "dog"},
@@ -945,7 +925,6 @@ def test_crew_kickoff_usage_metrics():
assert result.token_usage.cached_prompt_tokens == 0
@pytest.mark.timeout(60)
def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
agent = Agent(
role="test role",
@@ -966,7 +945,6 @@ def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
assert agent._rpm_controller is None
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_sequential_async_task_execution_completion():
list_ideas = Task(
@@ -999,7 +977,6 @@ def test_sequential_async_task_execution_completion():
)
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_single_task_with_async_execution():
researcher_agent = Agent(
@@ -1028,7 +1005,6 @@ def test_single_task_with_async_execution():
)
@pytest.mark.timeout(60)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_three_task_with_async_execution():
researcher_agent = Agent(
@@ -1074,7 +1050,6 @@ def test_three_task_with_async_execution():
)
@pytest.mark.timeout(60)
@pytest.mark.asyncio
async def test_crew_async_kickoff():
inputs = [
@@ -1122,7 +1097,6 @@ async def test_crew_async_kickoff():
assert result[0].token_usage.successful_requests > 0 # type: ignore
@pytest.mark.timeout(60)
@pytest.mark.asyncio
async def test_async_task_execution_call_count():
from unittest.mock import MagicMock, patch
@@ -1179,7 +1153,6 @@ async def test_async_task_execution_call_count():
assert mock_execute_sync.call_count == 1
@pytest.mark.timeout(60)
def test_kickoff_for_each_single_input():
"""Tests if kickoff_for_each works with a single input."""
@@ -1203,7 +1176,6 @@ def test_kickoff_for_each_single_input():
assert len(results) == 1
@pytest.mark.timeout(60)
def test_kickoff_for_each_multiple_inputs():
"""Tests if kickoff_for_each works with multiple inputs."""
@@ -1231,7 +1203,6 @@ def test_kickoff_for_each_multiple_inputs():
assert len(results) == len(inputs)
@pytest.mark.timeout(60)
def test_kickoff_for_each_empty_input():
"""Tests if kickoff_for_each handles an empty input list."""
agent = Agent(
@@ -1251,7 +1222,6 @@ def test_kickoff_for_each_empty_input():
assert results == []
@pytest.mark.timeout(60)
def test_kickoff_for_each_invalid_input():
"""Tests if kickoff_for_each raises TypeError for invalid input types."""
@@ -1274,7 +1244,6 @@ def test_kickoff_for_each_invalid_input():
crew.kickoff_for_each("invalid input")
@pytest.mark.timeout(60)
def test_kickoff_for_each_error_handling():
"""Tests error handling in kickoff_for_each when kickoff raises an error."""
from unittest.mock import patch
@@ -1311,7 +1280,6 @@ def test_kickoff_for_each_error_handling():
crew.kickoff_for_each(inputs=inputs)
@pytest.mark.timeout(60)
@pytest.mark.asyncio
async def test_kickoff_async_basic_functionality_and_output():
"""Tests the basic functionality and output of kickoff_async."""
@@ -1346,7 +1314,6 @@ async def test_kickoff_async_basic_functionality_and_output():
mock_kickoff.assert_called_once_with(inputs)
@pytest.mark.timeout(60)
@pytest.mark.asyncio
async def test_async_kickoff_for_each_async_basic_functionality_and_output():
"""Tests the basic functionality and output of kickoff_for_each_async."""
@@ -1393,7 +1360,6 @@ async def test_async_kickoff_for_each_async_basic_functionality_and_output():
mock_kickoff_async.assert_any_call(inputs=input_data)
@pytest.mark.timeout(60)
@pytest.mark.asyncio
async def test_async_kickoff_for_each_async_empty_input():
"""Tests if akickoff_for_each_async handles an empty input list."""
@@ -1423,7 +1389,6 @@ async def test_async_kickoff_for_each_async_empty_input():
assert results == [], "Result should be an empty list when input is empty"
@pytest.mark.timeout(60)
def test_set_agents_step_callback():
from unittest.mock import patch
@@ -1454,7 +1419,6 @@ def test_set_agents_step_callback():
assert researcher_agent.step_callback is not None
@pytest.mark.timeout(60)
def test_dont_set_agents_step_callback_if_already_set():
from unittest.mock import patch
@@ -1494,7 +1458,6 @@ def test_dont_set_agents_step_callback_if_already_set():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_crew_function_calling_llm():
from unittest.mock import patch
@@ -1532,7 +1495,6 @@ def test_crew_function_calling_llm():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_task_with_no_arguments():
from crewai.tools import tool
@@ -1601,7 +1563,6 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_delegation_is_not_enabled_if_there_are_only_one_agent():
researcher = Agent(
role="Researcher",
@@ -1623,7 +1584,6 @@ def test_delegation_is_not_enabled_if_there_are_only_one_agent():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agents_do_not_get_delegation_tools_with_there_is_only_one_agent():
agent = Agent(
role="Researcher",
@@ -1642,7 +1602,6 @@ def test_agents_do_not_get_delegation_tools_with_there_is_only_one_agent():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_sequential_crew_creation_tasks_without_agents():
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
@@ -1666,7 +1625,6 @@ def test_sequential_crew_creation_tasks_without_agents():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_agent_usage_metrics_are_captured_for_hierarchical_process():
agent = Agent(
role="Researcher",
@@ -1694,7 +1652,6 @@ def test_agent_usage_metrics_are_captured_for_hierarchical_process():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_hierarchical_crew_creation_tasks_with_agents():
"""
Agents are not required for tasks in a hierarchical process but sometimes they are still added
@@ -1748,7 +1705,6 @@ def test_hierarchical_crew_creation_tasks_with_agents():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_hierarchical_crew_creation_tasks_with_async_execution():
"""
Tests that async tasks in hierarchical crews are handled correctly with proper delegation tools
@@ -1806,7 +1762,6 @@ def test_hierarchical_crew_creation_tasks_with_async_execution():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_hierarchical_crew_creation_tasks_with_sync_last():
"""
Agents are not required for tasks in a hierarchical process but sometimes they are still added
@@ -1840,7 +1795,6 @@ def test_hierarchical_crew_creation_tasks_with_sync_last():
)
@pytest.mark.timeout(60)
def test_crew_inputs_interpolate_both_agents_and_tasks():
agent = Agent(
role="{topic} Researcher",
@@ -1865,7 +1819,6 @@ def test_crew_inputs_interpolate_both_agents_and_tasks():
assert crew.agents[0].backstory == "You have a lot of experience with AI."
@pytest.mark.timeout(60)
def test_crew_inputs_interpolate_both_agents_and_tasks_diff():
from unittest.mock import patch
@@ -1897,7 +1850,6 @@ def test_crew_inputs_interpolate_both_agents_and_tasks_diff():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_crew_does_not_interpolate_without_inputs():
from unittest.mock import patch
@@ -1922,7 +1874,6 @@ def test_crew_does_not_interpolate_without_inputs():
interpolate_task_inputs.assert_not_called()
@pytest.mark.timeout(60)
def test_task_callback_on_crew():
from unittest.mock import MagicMock, patch
@@ -1960,7 +1911,6 @@ def test_task_callback_on_crew():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_tools_with_custom_caching():
from unittest.mock import patch
@@ -2033,7 +1983,6 @@ def test_tools_with_custom_caching():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_using_contextual_memory():
from unittest.mock import patch
@@ -2062,7 +2011,6 @@ def test_using_contextual_memory():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_disabled_memory_using_contextual_memory():
from unittest.mock import patch
@@ -2091,7 +2039,6 @@ def test_disabled_memory_using_contextual_memory():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_crew_log_file_output(tmp_path):
test_file = tmp_path / "logs.txt"
tasks = [
@@ -2108,7 +2055,6 @@ def test_crew_log_file_output(tmp_path):
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_crew_output_file_end_to_end(tmp_path):
"""Test output file functionality in a full crew context."""
# Create an agent
@@ -2141,7 +2087,6 @@ def test_crew_output_file_end_to_end(tmp_path):
assert expected_file.exists(), f"Output file {expected_file} was not created"
@pytest.mark.timeout(60)
def test_crew_output_file_validation_failures():
"""Test output file validation failures in a crew context."""
agent = Agent(
@@ -2192,7 +2137,6 @@ def test_crew_output_file_validation_failures():
Crew(agents=[agent], tasks=[task]).kickoff()
@pytest.mark.timeout(60)
def test_manager_agent():
from unittest.mock import patch
@@ -2231,7 +2175,6 @@ def test_manager_agent():
mock_execute_sync.assert_called()
@pytest.mark.timeout(60)
def test_manager_agent_in_agents_raises_exception():
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
@@ -2254,7 +2197,6 @@ def test_manager_agent_in_agents_raises_exception():
)
@pytest.mark.timeout(60)
def test_manager_agent_with_tools_raises_exception():
from crewai.tools import tool
@@ -2291,7 +2233,6 @@ def test_manager_agent_with_tools_raises_exception():
@patch("crewai.crew.CrewTrainingHandler")
@patch("crewai.crew.TaskEvaluator")
@patch("crewai.crew.Crew.copy")
@pytest.mark.timeout(60)
def test_crew_train_success(
copy_mock, task_evaluator, crew_training_handler, kickoff_mock
):
@@ -2355,7 +2296,6 @@ def test_crew_train_success(
)
@pytest.mark.timeout(60)
def test_crew_train_error():
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article",
@@ -2406,7 +2346,6 @@ def test__setup_for_training():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_replay_feature():
list_ideas = Task(
description="Generate a list of 5 interesting ideas to explore for an article, where each bulletpoint is under 15 words.",
@@ -2444,7 +2383,6 @@ def test_replay_feature():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_crew_replay_error():
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article",
@@ -2463,7 +2401,6 @@ def test_crew_replay_error():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_crew_task_db_init():
agent = Agent(
role="Content Writer",
@@ -2502,7 +2439,6 @@ def test_crew_task_db_init():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_replay_task_with_context():
agent1 = Agent(
role="Researcher",
@@ -2602,7 +2538,6 @@ def test_replay_task_with_context():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_replay_with_context():
agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal")
task1 = Task(
@@ -2661,7 +2596,6 @@ def test_replay_with_context():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_replay_with_invalid_task_id():
agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal")
task1 = Task(
@@ -2722,7 +2656,6 @@ def test_replay_with_invalid_task_id():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
@patch.object(Crew, "_interpolate_inputs")
def test_replay_interpolates_inputs_properly(mock_interpolate_inputs):
agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal")
@@ -2784,7 +2717,6 @@ def test_replay_interpolates_inputs_properly(mock_interpolate_inputs):
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_replay_setup_context():
agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal")
task1 = Task(description="Context Task", expected_output="Say {name}", agent=agent)
@@ -2847,7 +2779,6 @@ def test_replay_setup_context():
assert crew.tasks[1].prompt_context == "context raw output"
@pytest.mark.timeout(60)
def test_key():
tasks = [
Task(
@@ -2873,7 +2804,6 @@ def test_key():
assert crew.key == hash
@pytest.mark.timeout(60)
def test_key_with_interpolated_inputs():
researcher = Agent(
role="{topic} Researcher",
@@ -2918,7 +2848,6 @@ def test_key_with_interpolated_inputs():
assert crew.key == curr_key
@pytest.mark.timeout(60)
def test_conditional_task_requirement_breaks_when_singular_conditional_task():
def condition_fn(output) -> bool:
return output.raw.startswith("Andrew Ng has!!")
@@ -2937,7 +2866,6 @@ def test_conditional_task_requirement_breaks_when_singular_conditional_task():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_conditional_task_last_task_when_conditional_is_true():
def condition_fn(output) -> bool:
return True
@@ -2965,7 +2893,6 @@ def test_conditional_task_last_task_when_conditional_is_true():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_conditional_task_last_task_when_conditional_is_false():
def condition_fn(output) -> bool:
return False
@@ -2990,7 +2917,6 @@ def test_conditional_task_last_task_when_conditional_is_false():
assert result.raw == "Hi"
@pytest.mark.timeout(60)
def test_conditional_task_requirement_breaks_when_task_async():
def my_condition(context):
return context.get("some_value") > 10
@@ -3016,7 +2942,6 @@ def test_conditional_task_requirement_breaks_when_task_async():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_conditional_should_skip():
task1 = Task(description="Return hello", expected_output="say hi", agent=researcher)
@@ -3049,7 +2974,6 @@ def test_conditional_should_skip():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_conditional_should_execute():
task1 = Task(description="Return hello", expected_output="say hi", agent=researcher)
@@ -3083,7 +3007,6 @@ def test_conditional_should_execute():
@mock.patch("crewai.crew.CrewEvaluator")
@mock.patch("crewai.crew.Crew.copy")
@mock.patch("crewai.crew.Crew.kickoff")
@pytest.mark.timeout(60)
def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
@@ -3118,7 +3041,6 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_hierarchical_verbose_manager_agent():
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
@@ -3140,7 +3062,6 @@ def test_hierarchical_verbose_manager_agent():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_hierarchical_verbose_false_manager_agent():
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",

View File

@@ -6,6 +6,9 @@ from unittest.mock import patch
import pytest
# Attempt to import the module and skip the test if it's not available
docling = pytest.importorskip("docling")
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource
from crewai.knowledge.source.csv_knowledge_source import CSVKnowledgeSource
from crewai.knowledge.source.excel_knowledge_source import ExcelKnowledgeSource
@@ -376,7 +379,7 @@ def test_multiple_2k_character_files(mock_vector_db, tmpdir):
mock_vector_db.query.assert_called_once()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr(record_mode="new_episodes", filter_headers=["authorization"])
def test_hybrid_string_and_files(mock_vector_db, tmpdir):
# Create string sources
string_contents = [
@@ -445,7 +448,7 @@ def test_pdf_knowledge_source(mock_vector_db):
mock_vector_db.query.assert_called_once()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr(record_mode="new_episodes", filter_headers=["authorization"])
def test_csv_knowledge_source(mock_vector_db, tmpdir):
"""Test CSVKnowledgeSource with a simple CSV file."""
@@ -578,6 +581,7 @@ def test_multiple_docling_sources():
assert docling_source.content is not None
@pytest.mark.vcr(record_mode="new_episodes", filter_headers=["authorization"])
def test_docling_source_with_local_file():
current_dir = Path(__file__).parent
pdf_path = current_dir / "crewai_quickstart.pdf"
@@ -606,6 +610,6 @@ def test_file_path_validation():
# Test neither file_path nor file_paths provided
with pytest.raises(
ValueError,
match="file_path/file_paths must be a Path, str, or a list of these types"
match="file_path/file_paths must be a Path, str, or a list of these types",
):
PDFKnowledgeSource()

View File

@@ -216,7 +216,6 @@ def test_multiple_output_type_error():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_pydantic_sequential():
class ScoreOutput(BaseModel):
score: int
@@ -242,7 +241,6 @@ def test_output_pydantic_sequential():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_pydantic_hierarchical():
class ScoreOutput(BaseModel):
score: int
@@ -273,7 +271,6 @@ def test_output_pydantic_hierarchical():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_sequential():
class ScoreOutput(BaseModel):
score: int
@@ -300,7 +297,6 @@ def test_output_json_sequential():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_hierarchical():
class ScoreOutput(BaseModel):
score: int
@@ -331,7 +327,6 @@ def test_output_json_hierarchical():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_json_property_without_output_json():
class ScoreOutput(BaseModel):
score: int
@@ -360,7 +355,6 @@ def test_json_property_without_output_json():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_dict_sequential():
class ScoreOutput(BaseModel):
score: int
@@ -386,7 +380,6 @@ def test_output_json_dict_sequential():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_dict_hierarchical():
class ScoreOutput(BaseModel):
score: int
@@ -417,7 +410,6 @@ def test_output_json_dict_hierarchical():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_pydantic_to_another_task():
class ScoreOutput(BaseModel):
score: int
@@ -456,7 +448,6 @@ def test_output_pydantic_to_another_task():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_to_another_task():
class ScoreOutput(BaseModel):
score: int
@@ -488,7 +479,6 @@ def test_output_json_to_another_task():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_save_task_output():
scorer = Agent(
role="Scorer",
@@ -513,7 +503,6 @@ def test_save_task_output():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_save_task_json_output():
class ScoreOutput(BaseModel):
score: int
@@ -544,7 +533,6 @@ def test_save_task_json_output():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_save_task_pydantic_output():
class ScoreOutput(BaseModel):
score: int
@@ -575,7 +563,6 @@ def test_save_task_pydantic_output():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_custom_converter_cls():
class ScoreOutput(BaseModel):
score: int
@@ -608,7 +595,6 @@ def test_custom_converter_cls():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_increment_delegations_for_hierarchical_process():
scorer = Agent(
role="Scorer",
@@ -636,7 +622,6 @@ def test_increment_delegations_for_hierarchical_process():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_increment_delegations_for_sequential_process():
manager = Agent(
role="Manager",
@@ -671,7 +656,6 @@ def test_increment_delegations_for_sequential_process():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_increment_tool_errors():
from crewai.tools import tool
@@ -705,7 +689,6 @@ def test_increment_tool_errors():
assert len(increment_tools_errors.mock_calls) > 0
@pytest.mark.timeout(60)
def test_task_definition_based_on_dict():
config = {
"description": "Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work', check examples to based your evaluation.",
@@ -719,7 +702,6 @@ def test_task_definition_based_on_dict():
assert task.agent is None
@pytest.mark.timeout(60)
def test_conditional_task_definition_based_on_dict():
config = {
"description": "Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work', check examples to based your evaluation.",
@@ -733,7 +715,6 @@ def test_conditional_task_definition_based_on_dict():
assert task.agent is None
@pytest.mark.timeout(60)
def test_interpolate_inputs():
task = Task(
description="Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting.",
@@ -758,7 +739,6 @@ def test_interpolate_inputs():
assert task.output_file == "/tmp/ML/output_2025.txt"
@pytest.mark.timeout(60)
def test_interpolate_only():
"""Test the interpolate_only method for various scenarios including JSON structure preservation."""
task = Task(
@@ -795,7 +775,6 @@ def test_interpolate_only():
assert result == no_placeholders
@pytest.mark.timeout(60)
def test_task_output_str_with_pydantic():
from crewai.tasks.output_format import OutputFormat
@@ -813,7 +792,6 @@ def test_task_output_str_with_pydantic():
assert str(task_output) == str(score_output)
@pytest.mark.timeout(60)
def test_task_output_str_with_json_dict():
from crewai.tasks.output_format import OutputFormat
@@ -828,7 +806,6 @@ def test_task_output_str_with_json_dict():
assert str(task_output) == str(json_dict)
@pytest.mark.timeout(60)
def test_task_output_str_with_raw():
from crewai.tasks.output_format import OutputFormat
@@ -843,7 +820,6 @@ def test_task_output_str_with_raw():
assert str(task_output) == raw_output
@pytest.mark.timeout(60)
def test_task_output_str_with_pydantic_and_json_dict():
from crewai.tasks.output_format import OutputFormat
@@ -864,7 +840,6 @@ def test_task_output_str_with_pydantic_and_json_dict():
assert str(task_output) == str(score_output)
@pytest.mark.timeout(60)
def test_task_output_str_with_none():
from crewai.tasks.output_format import OutputFormat
@@ -877,7 +852,6 @@ def test_task_output_str_with_none():
assert str(task_output) == ""
@pytest.mark.timeout(60)
def test_key():
original_description = "Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting."
original_expected_output = "Bullet point list of 5 interesting ideas about {topic}."
@@ -897,7 +871,6 @@ def test_key():
), "The key should be the hash of the non-interpolated description."
@pytest.mark.timeout(60)
def test_output_file_validation():
"""Test output file path validation."""
# Valid paths

View File

@@ -14,7 +14,6 @@ class TestAgentTool(BaseAgentTool):
return "Test response"
@pytest.mark.timeout(60)
@pytest.mark.parametrize(
"role_name,should_match",
[

View File

@@ -8,7 +8,6 @@ from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
@pytest.mark.timeout(60)
def test_task_without_guardrail():
"""Test that tasks work normally without guardrails (backward compatibility)."""
agent = Mock()
@@ -23,7 +22,6 @@ def test_task_without_guardrail():
assert result.raw == "test result"
@pytest.mark.timeout(60)
def test_task_with_successful_guardrail():
"""Test that successful guardrail validation passes transformed result."""
@@ -42,7 +40,6 @@ def test_task_with_successful_guardrail():
assert result.raw == "TEST RESULT"
@pytest.mark.timeout(60)
def test_task_with_failing_guardrail():
"""Test that failing guardrail triggers retry with error context."""
@@ -70,7 +67,6 @@ def test_task_with_failing_guardrail():
assert task.retry_count == 1
@pytest.mark.timeout(60)
def test_task_with_guardrail_retries():
"""Test that guardrail respects max_retries configuration."""
@@ -97,7 +93,6 @@ def test_task_with_guardrail_retries():
assert "Invalid format" in str(exc_info.value)
@pytest.mark.timeout(60)
def test_guardrail_error_in_context():
"""Test that guardrail error is passed in context for retry."""

4
uv.lock generated
View File

@@ -594,6 +594,7 @@ dependencies = [
{ name = "blinker" },
{ name = "chromadb" },
{ name = "click" },
{ name = "crewai-tools" },
{ name = "instructor" },
{ name = "json-repair" },
{ name = "jsonref" },
@@ -645,7 +646,6 @@ tools = [
[package.dev-dependencies]
dev = [
{ name = "cairosvg" },
{ name = "crewai-tools" },
{ name = "mkdocs" },
{ name = "mkdocs-material" },
{ name = "mkdocs-material-extensions" },
@@ -670,6 +670,7 @@ requires-dist = [
{ name = "blinker", specifier = ">=1.9.0" },
{ name = "chromadb", specifier = ">=0.5.23" },
{ name = "click", specifier = ">=8.1.7" },
{ name = "crewai-tools", specifier = ">=0.17.0" },
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = ">=0.17.0" },
{ name = "docling", marker = "extra == 'docling'", specifier = ">=2.12.0" },
{ name = "fastembed", marker = "extra == 'fastembed'", specifier = ">=0.4.1" },
@@ -700,7 +701,6 @@ requires-dist = [
[package.metadata.requires-dev]
dev = [
{ name = "cairosvg", specifier = ">=2.7.1" },
{ name = "crewai-tools", specifier = ">=0.17.0" },
{ name = "mkdocs", specifier = ">=1.4.3" },
{ name = "mkdocs-material", specifier = ">=9.5.7" },
{ name = "mkdocs-material-extensions", specifier = ">=1.3.1" },