tests should work now

This commit is contained in:
Brandon Hancock
2025-01-02 14:18:11 -05:00
parent 11ff8270ea
commit db61f5dd59
13 changed files with 10 additions and 264 deletions

View File

@@ -216,7 +216,6 @@ def test_multiple_output_type_error():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_pydantic_sequential():
class ScoreOutput(BaseModel):
score: int
@@ -242,7 +241,6 @@ def test_output_pydantic_sequential():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_pydantic_hierarchical():
class ScoreOutput(BaseModel):
score: int
@@ -273,7 +271,6 @@ def test_output_pydantic_hierarchical():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_sequential():
class ScoreOutput(BaseModel):
score: int
@@ -300,7 +297,6 @@ def test_output_json_sequential():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_hierarchical():
class ScoreOutput(BaseModel):
score: int
@@ -331,7 +327,6 @@ def test_output_json_hierarchical():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_json_property_without_output_json():
class ScoreOutput(BaseModel):
score: int
@@ -360,7 +355,6 @@ def test_json_property_without_output_json():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_dict_sequential():
class ScoreOutput(BaseModel):
score: int
@@ -386,7 +380,6 @@ def test_output_json_dict_sequential():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_dict_hierarchical():
class ScoreOutput(BaseModel):
score: int
@@ -417,7 +410,6 @@ def test_output_json_dict_hierarchical():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_pydantic_to_another_task():
class ScoreOutput(BaseModel):
score: int
@@ -456,7 +448,6 @@ def test_output_pydantic_to_another_task():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_output_json_to_another_task():
class ScoreOutput(BaseModel):
score: int
@@ -488,7 +479,6 @@ def test_output_json_to_another_task():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_save_task_output():
scorer = Agent(
role="Scorer",
@@ -513,7 +503,6 @@ def test_save_task_output():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_save_task_json_output():
class ScoreOutput(BaseModel):
score: int
@@ -544,7 +533,6 @@ def test_save_task_json_output():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_save_task_pydantic_output():
class ScoreOutput(BaseModel):
score: int
@@ -575,7 +563,6 @@ def test_save_task_pydantic_output():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_custom_converter_cls():
class ScoreOutput(BaseModel):
score: int
@@ -608,7 +595,6 @@ def test_custom_converter_cls():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_increment_delegations_for_hierarchical_process():
scorer = Agent(
role="Scorer",
@@ -636,7 +622,6 @@ def test_increment_delegations_for_hierarchical_process():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_increment_delegations_for_sequential_process():
manager = Agent(
role="Manager",
@@ -671,7 +656,6 @@ def test_increment_delegations_for_sequential_process():
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(60)
def test_increment_tool_errors():
from crewai.tools import tool
@@ -705,7 +689,6 @@ def test_increment_tool_errors():
assert len(increment_tools_errors.mock_calls) > 0
@pytest.mark.timeout(60)
def test_task_definition_based_on_dict():
config = {
"description": "Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work', check examples to based your evaluation.",
@@ -719,7 +702,6 @@ def test_task_definition_based_on_dict():
assert task.agent is None
@pytest.mark.timeout(60)
def test_conditional_task_definition_based_on_dict():
config = {
"description": "Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work', check examples to based your evaluation.",
@@ -733,7 +715,6 @@ def test_conditional_task_definition_based_on_dict():
assert task.agent is None
@pytest.mark.timeout(60)
def test_interpolate_inputs():
task = Task(
description="Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting.",
@@ -758,7 +739,6 @@ def test_interpolate_inputs():
assert task.output_file == "/tmp/ML/output_2025.txt"
@pytest.mark.timeout(60)
def test_interpolate_only():
"""Test the interpolate_only method for various scenarios including JSON structure preservation."""
task = Task(
@@ -795,7 +775,6 @@ def test_interpolate_only():
assert result == no_placeholders
@pytest.mark.timeout(60)
def test_task_output_str_with_pydantic():
from crewai.tasks.output_format import OutputFormat
@@ -813,7 +792,6 @@ def test_task_output_str_with_pydantic():
assert str(task_output) == str(score_output)
@pytest.mark.timeout(60)
def test_task_output_str_with_json_dict():
from crewai.tasks.output_format import OutputFormat
@@ -828,7 +806,6 @@ def test_task_output_str_with_json_dict():
assert str(task_output) == str(json_dict)
@pytest.mark.timeout(60)
def test_task_output_str_with_raw():
from crewai.tasks.output_format import OutputFormat
@@ -843,7 +820,6 @@ def test_task_output_str_with_raw():
assert str(task_output) == raw_output
@pytest.mark.timeout(60)
def test_task_output_str_with_pydantic_and_json_dict():
from crewai.tasks.output_format import OutputFormat
@@ -864,7 +840,6 @@ def test_task_output_str_with_pydantic_and_json_dict():
assert str(task_output) == str(score_output)
@pytest.mark.timeout(60)
def test_task_output_str_with_none():
from crewai.tasks.output_format import OutputFormat
@@ -877,7 +852,6 @@ def test_task_output_str_with_none():
assert str(task_output) == ""
@pytest.mark.timeout(60)
def test_key():
original_description = "Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting."
original_expected_output = "Bullet point list of 5 interesting ideas about {topic}."
@@ -897,7 +871,6 @@ def test_key():
), "The key should be the hash of the non-interpolated description."
@pytest.mark.timeout(60)
def test_output_file_validation():
"""Test output file path validation."""
# Valid paths