mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-28 09:38:17 +00:00
Apply automatic linting fixes to tests directory
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -25,15 +25,15 @@ class InternalCrewEvaluator:
|
||||
|
||||
return CrewEvaluator(crew, openai_model_name="gpt-4o-mini")
|
||||
|
||||
def test_setup_for_evaluating(self, crew_planner):
|
||||
def test_setup_for_evaluating(self, crew_planner) -> None:
|
||||
crew_planner._setup_for_evaluating()
|
||||
assert crew_planner.crew.tasks[0].callback == crew_planner.evaluate
|
||||
|
||||
def test_set_iteration(self, crew_planner):
|
||||
def test_set_iteration(self, crew_planner) -> None:
|
||||
crew_planner.set_iteration(1)
|
||||
assert crew_planner.iteration == 1
|
||||
|
||||
def test_evaluator_agent(self, crew_planner):
|
||||
def test_evaluator_agent(self, crew_planner) -> None:
|
||||
agent = crew_planner._evaluator_agent()
|
||||
assert agent.role == "Task Execution Evaluator"
|
||||
assert (
|
||||
@@ -47,7 +47,7 @@ class InternalCrewEvaluator:
|
||||
assert agent.verbose is False
|
||||
assert agent.llm.model == "gpt-4o-mini"
|
||||
|
||||
def test_evaluation_task(self, crew_planner):
|
||||
def test_evaluation_task(self, crew_planner) -> None:
|
||||
evaluator_agent = Agent(
|
||||
role="Evaluator Agent",
|
||||
goal="Evaluate the performance of the agents in the crew",
|
||||
@@ -60,11 +60,11 @@ class InternalCrewEvaluator:
|
||||
)
|
||||
task_output = "Task Output 1"
|
||||
task = crew_planner._evaluation_task(
|
||||
evaluator_agent, task_to_evaluate, task_output
|
||||
evaluator_agent, task_to_evaluate, task_output,
|
||||
)
|
||||
|
||||
assert task.description.startswith(
|
||||
"Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance."
|
||||
"Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.",
|
||||
)
|
||||
|
||||
assert task.agent == evaluator_agent
|
||||
@@ -79,7 +79,7 @@ class InternalCrewEvaluator:
|
||||
|
||||
@mock.patch("crewai.utilities.evaluators.crew_evaluator_handler.Console")
|
||||
@mock.patch("crewai.utilities.evaluators.crew_evaluator_handler.Table")
|
||||
def test_print_crew_evaluation_result(self, table, console, crew_planner):
|
||||
def test_print_crew_evaluation_result(self, table, console, crew_planner) -> None:
|
||||
# Set up task scores and execution times
|
||||
crew_planner.tasks_scores = {
|
||||
1: [10, 9, 8],
|
||||
@@ -97,10 +97,10 @@ class InternalCrewEvaluator:
|
||||
]
|
||||
crew_planner.crew.tasks = [
|
||||
mock.Mock(
|
||||
agent=crew_planner.crew.agents[0], processed_by_agents=["Agent 1"]
|
||||
agent=crew_planner.crew.agents[0], processed_by_agents=["Agent 1"],
|
||||
),
|
||||
mock.Mock(
|
||||
agent=crew_planner.crew.agents[1], processed_by_agents=["Agent 2"]
|
||||
agent=crew_planner.crew.agents[1], processed_by_agents=["Agent 2"],
|
||||
),
|
||||
]
|
||||
|
||||
@@ -111,7 +111,7 @@ class InternalCrewEvaluator:
|
||||
table.assert_has_calls(
|
||||
[
|
||||
mock.call(
|
||||
title="Tasks Scores \n (1-10 Higher is better)", box=mock.ANY
|
||||
title="Tasks Scores \n (1-10 Higher is better)", box=mock.ANY,
|
||||
), # Title and styling
|
||||
mock.call().add_column("Tasks/Crew/Agents", style="cyan"), # Columns
|
||||
mock.call().add_column("Run 1", justify="center"),
|
||||
@@ -125,15 +125,15 @@ class InternalCrewEvaluator:
|
||||
# Add crew averages and execution times
|
||||
mock.call().add_row("Crew", "9.00", "8.00", "8.5", ""),
|
||||
mock.call().add_row("Execution Time (s)", "135", "155", "145", ""),
|
||||
]
|
||||
],
|
||||
)
|
||||
|
||||
# Ensure the console prints the table
|
||||
console.assert_has_calls([mock.call(), mock.call().print(table())])
|
||||
|
||||
def test_evaluate(self, crew_planner):
|
||||
def test_evaluate(self, crew_planner) -> None:
|
||||
task_output = TaskOutput(
|
||||
description="Task 1", agent=str(crew_planner.crew.agents[0])
|
||||
description="Task 1", agent=str(crew_planner.crew.agents[0]),
|
||||
)
|
||||
|
||||
with mock.patch.object(Task, "execute_sync") as execute:
|
||||
|
||||
@@ -8,7 +8,7 @@ from crewai.utilities.evaluators.task_evaluator import (
|
||||
|
||||
|
||||
@patch("crewai.utilities.evaluators.task_evaluator.Converter")
|
||||
def test_evaluate_training_data(converter_mock):
|
||||
def test_evaluate_training_data(converter_mock) -> None:
|
||||
training_data = {
|
||||
"agent_id": {
|
||||
"data1": {
|
||||
@@ -21,7 +21,7 @@ def test_evaluate_training_data(converter_mock):
|
||||
"human_feedback": "Human feedback 2",
|
||||
"improved_output": "Improved output 2",
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
agent_id = "agent_id"
|
||||
original_agent = MagicMock()
|
||||
@@ -30,7 +30,7 @@ def test_evaluate_training_data(converter_mock):
|
||||
suggestions=[
|
||||
"The initial output was already good, having a detailed explanation. However, the improved output "
|
||||
"gave similar information but in a more professional manner using better vocabulary. For future tasks, "
|
||||
"try to implement more elaborate language and precise terminology from the beginning."
|
||||
"try to implement more elaborate language and precise terminology from the beginning.",
|
||||
],
|
||||
quality=8.0,
|
||||
final_summary="The agent responded well initially. However, the improved output showed that there is room "
|
||||
@@ -39,7 +39,7 @@ def test_evaluate_training_data(converter_mock):
|
||||
)
|
||||
converter_mock.return_value.to_pydantic.return_value = function_return_value
|
||||
result = TaskEvaluator(original_agent=original_agent).evaluate_training_data(
|
||||
training_data, agent_id
|
||||
training_data, agent_id,
|
||||
)
|
||||
|
||||
assert result == function_return_value
|
||||
@@ -61,5 +61,5 @@ def test_evaluate_training_data(converter_mock):
|
||||
"following structure, with the following keys:\n{\n suggestions: List[str],\n quality: float,\n final_summary: str\n}",
|
||||
),
|
||||
mock.call().to_pydantic(),
|
||||
]
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from typing import NoReturn
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai.utilities.events.base_events import BaseEvent
|
||||
@@ -8,11 +9,11 @@ class TestEvent(BaseEvent):
|
||||
pass
|
||||
|
||||
|
||||
def test_specific_event_handler():
|
||||
def test_specific_event_handler() -> None:
|
||||
mock_handler = Mock()
|
||||
|
||||
@crewai_event_bus.on(TestEvent)
|
||||
def handler(source, event):
|
||||
def handler(source, event) -> None:
|
||||
mock_handler(source, event)
|
||||
|
||||
event = TestEvent(type="test_event")
|
||||
@@ -21,11 +22,11 @@ def test_specific_event_handler():
|
||||
mock_handler.assert_called_once_with("source_object", event)
|
||||
|
||||
|
||||
def test_wildcard_event_handler():
|
||||
def test_wildcard_event_handler() -> None:
|
||||
mock_handler = Mock()
|
||||
|
||||
@crewai_event_bus.on(BaseEvent)
|
||||
def handler(source, event):
|
||||
def handler(source, event) -> None:
|
||||
mock_handler(source, event)
|
||||
|
||||
event = TestEvent(type="test_event")
|
||||
@@ -34,10 +35,11 @@ def test_wildcard_event_handler():
|
||||
mock_handler.assert_called_once_with("source_object", event)
|
||||
|
||||
|
||||
def test_event_bus_error_handling(capfd):
|
||||
def test_event_bus_error_handling(capfd) -> None:
|
||||
@crewai_event_bus.on(BaseEvent)
|
||||
def broken_handler(source, event):
|
||||
raise ValueError("Simulated handler failure")
|
||||
def broken_handler(source, event) -> NoReturn:
|
||||
msg = "Simulated handler failure"
|
||||
raise ValueError(msg)
|
||||
|
||||
event = TestEvent(type="test_event")
|
||||
crewai_event_bus.emit("source_object", event)
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
import unittest
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.utilities.chromadb import (
|
||||
MAX_COLLECTION_LENGTH,
|
||||
@@ -12,58 +9,58 @@ from crewai.utilities.chromadb import (
|
||||
|
||||
|
||||
class TestChromadbUtils(unittest.TestCase):
|
||||
def test_sanitize_collection_name_long_name(self):
|
||||
def test_sanitize_collection_name_long_name(self) -> None:
|
||||
"""Test sanitizing a very long collection name."""
|
||||
long_name = "This is an extremely long role name that will definitely exceed the ChromaDB collection name limit of 63 characters and cause an error when used as a collection name"
|
||||
sanitized = sanitize_collection_name(long_name)
|
||||
self.assertLessEqual(len(sanitized), MAX_COLLECTION_LENGTH)
|
||||
self.assertTrue(sanitized[0].isalnum())
|
||||
self.assertTrue(sanitized[-1].isalnum())
|
||||
self.assertTrue(all(c.isalnum() or c in ["_", "-"] for c in sanitized))
|
||||
assert len(sanitized) <= MAX_COLLECTION_LENGTH
|
||||
assert sanitized[0].isalnum()
|
||||
assert sanitized[-1].isalnum()
|
||||
assert all(c.isalnum() or c in ["_", "-"] for c in sanitized)
|
||||
|
||||
def test_sanitize_collection_name_special_chars(self):
|
||||
def test_sanitize_collection_name_special_chars(self) -> None:
|
||||
"""Test sanitizing a name with special characters."""
|
||||
special_chars = "Agent@123!#$%^&*()"
|
||||
sanitized = sanitize_collection_name(special_chars)
|
||||
self.assertTrue(sanitized[0].isalnum())
|
||||
self.assertTrue(sanitized[-1].isalnum())
|
||||
self.assertTrue(all(c.isalnum() or c in ["_", "-"] for c in sanitized))
|
||||
assert sanitized[0].isalnum()
|
||||
assert sanitized[-1].isalnum()
|
||||
assert all(c.isalnum() or c in ["_", "-"] for c in sanitized)
|
||||
|
||||
def test_sanitize_collection_name_short_name(self):
|
||||
def test_sanitize_collection_name_short_name(self) -> None:
|
||||
"""Test sanitizing a very short name."""
|
||||
short_name = "A"
|
||||
sanitized = sanitize_collection_name(short_name)
|
||||
self.assertGreaterEqual(len(sanitized), MIN_COLLECTION_LENGTH)
|
||||
self.assertTrue(sanitized[0].isalnum())
|
||||
self.assertTrue(sanitized[-1].isalnum())
|
||||
assert len(sanitized) >= MIN_COLLECTION_LENGTH
|
||||
assert sanitized[0].isalnum()
|
||||
assert sanitized[-1].isalnum()
|
||||
|
||||
def test_sanitize_collection_name_bad_ends(self):
|
||||
def test_sanitize_collection_name_bad_ends(self) -> None:
|
||||
"""Test sanitizing a name with non-alphanumeric start/end."""
|
||||
bad_ends = "_Agent_"
|
||||
sanitized = sanitize_collection_name(bad_ends)
|
||||
self.assertTrue(sanitized[0].isalnum())
|
||||
self.assertTrue(sanitized[-1].isalnum())
|
||||
assert sanitized[0].isalnum()
|
||||
assert sanitized[-1].isalnum()
|
||||
|
||||
def test_sanitize_collection_name_none(self):
|
||||
def test_sanitize_collection_name_none(self) -> None:
|
||||
"""Test sanitizing a None value."""
|
||||
sanitized = sanitize_collection_name(None)
|
||||
self.assertEqual(sanitized, "default_collection")
|
||||
assert sanitized == "default_collection"
|
||||
|
||||
def test_sanitize_collection_name_ipv4_pattern(self):
|
||||
def test_sanitize_collection_name_ipv4_pattern(self) -> None:
|
||||
"""Test sanitizing an IPv4 address."""
|
||||
ipv4 = "192.168.1.1"
|
||||
sanitized = sanitize_collection_name(ipv4)
|
||||
self.assertTrue(sanitized.startswith("ip_"))
|
||||
self.assertTrue(sanitized[0].isalnum())
|
||||
self.assertTrue(sanitized[-1].isalnum())
|
||||
self.assertTrue(all(c.isalnum() or c in ["_", "-"] for c in sanitized))
|
||||
assert sanitized.startswith("ip_")
|
||||
assert sanitized[0].isalnum()
|
||||
assert sanitized[-1].isalnum()
|
||||
assert all(c.isalnum() or c in ["_", "-"] for c in sanitized)
|
||||
|
||||
def test_is_ipv4_pattern(self):
|
||||
def test_is_ipv4_pattern(self) -> None:
|
||||
"""Test IPv4 pattern detection."""
|
||||
self.assertTrue(is_ipv4_pattern("192.168.1.1"))
|
||||
self.assertFalse(is_ipv4_pattern("not.an.ip.address"))
|
||||
assert is_ipv4_pattern("192.168.1.1")
|
||||
assert not is_ipv4_pattern("not.an.ip.address")
|
||||
|
||||
def test_sanitize_collection_name_properties(self):
|
||||
def test_sanitize_collection_name_properties(self) -> None:
|
||||
"""Test that sanitized collection names always meet ChromaDB requirements."""
|
||||
test_cases = [
|
||||
"A" * 100, # Very long name
|
||||
@@ -75,7 +72,7 @@ class TestChromadbUtils(unittest.TestCase):
|
||||
]
|
||||
for test_case in test_cases:
|
||||
sanitized = sanitize_collection_name(test_case)
|
||||
self.assertGreaterEqual(len(sanitized), MIN_COLLECTION_LENGTH)
|
||||
self.assertLessEqual(len(sanitized), MAX_COLLECTION_LENGTH)
|
||||
self.assertTrue(sanitized[0].isalnum())
|
||||
self.assertTrue(sanitized[-1].isalnum())
|
||||
assert len(sanitized) >= MIN_COLLECTION_LENGTH
|
||||
assert len(sanitized) <= MAX_COLLECTION_LENGTH
|
||||
assert sanitized[0].isalnum()
|
||||
assert sanitized[-1].isalnum()
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
import json
|
||||
import os
|
||||
from typing import Dict, List, Optional
|
||||
from unittest.mock import MagicMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
@@ -73,7 +71,7 @@ def mock_agent():
|
||||
|
||||
|
||||
# Tests for convert_to_model
|
||||
def test_convert_to_model_with_valid_json():
|
||||
def test_convert_to_model_with_valid_json() -> None:
|
||||
result = '{"name": "John", "age": 30}'
|
||||
output = convert_to_model(result, SimpleModel, None, None)
|
||||
assert isinstance(output, SimpleModel)
|
||||
@@ -81,7 +79,7 @@ def test_convert_to_model_with_valid_json():
|
||||
assert output.age == 30
|
||||
|
||||
|
||||
def test_convert_to_model_with_invalid_json():
|
||||
def test_convert_to_model_with_invalid_json() -> None:
|
||||
result = '{"name": "John", "age": "thirty"}'
|
||||
with patch("crewai.utilities.converter.handle_partial_json") as mock_handle:
|
||||
mock_handle.return_value = "Fallback result"
|
||||
@@ -89,13 +87,13 @@ def test_convert_to_model_with_invalid_json():
|
||||
assert output == "Fallback result"
|
||||
|
||||
|
||||
def test_convert_to_model_with_no_model():
|
||||
def test_convert_to_model_with_no_model() -> None:
|
||||
result = "Plain text"
|
||||
output = convert_to_model(result, None, None, None)
|
||||
assert output == "Plain text"
|
||||
|
||||
|
||||
def test_convert_to_model_with_special_characters():
|
||||
def test_convert_to_model_with_special_characters() -> None:
|
||||
json_string_test = """
|
||||
{
|
||||
"responses": [
|
||||
@@ -114,15 +112,15 @@ def test_convert_to_model_with_special_characters():
|
||||
)
|
||||
|
||||
|
||||
def test_convert_to_model_with_escaped_special_characters():
|
||||
def test_convert_to_model_with_escaped_special_characters() -> None:
|
||||
json_string_test = json.dumps(
|
||||
{
|
||||
"responses": [
|
||||
{
|
||||
"previous_message_content": "Hi Tom,\r\n\r\nNiamh has chosen the Mika phonics on"
|
||||
}
|
||||
]
|
||||
}
|
||||
"previous_message_content": "Hi Tom,\r\n\r\nNiamh has chosen the Mika phonics on",
|
||||
},
|
||||
],
|
||||
},
|
||||
)
|
||||
output = convert_to_model(json_string_test, EmailResponses, None, None)
|
||||
assert isinstance(output, EmailResponses)
|
||||
@@ -133,7 +131,7 @@ def test_convert_to_model_with_escaped_special_characters():
|
||||
)
|
||||
|
||||
|
||||
def test_convert_to_model_with_multiple_special_characters():
|
||||
def test_convert_to_model_with_multiple_special_characters() -> None:
|
||||
json_string_test = """
|
||||
{
|
||||
"responses": [
|
||||
@@ -153,7 +151,7 @@ def test_convert_to_model_with_multiple_special_characters():
|
||||
|
||||
|
||||
# Tests for validate_model
|
||||
def test_validate_model_pydantic_output():
|
||||
def test_validate_model_pydantic_output() -> None:
|
||||
result = '{"name": "Alice", "age": 25}'
|
||||
output = validate_model(result, SimpleModel, False)
|
||||
assert isinstance(output, SimpleModel)
|
||||
@@ -161,7 +159,7 @@ def test_validate_model_pydantic_output():
|
||||
assert output.age == 25
|
||||
|
||||
|
||||
def test_validate_model_json_output():
|
||||
def test_validate_model_json_output() -> None:
|
||||
result = '{"name": "Bob", "age": 40}'
|
||||
output = validate_model(result, SimpleModel, True)
|
||||
assert isinstance(output, dict)
|
||||
@@ -169,7 +167,7 @@ def test_validate_model_json_output():
|
||||
|
||||
|
||||
# Tests for handle_partial_json
|
||||
def test_handle_partial_json_with_valid_partial():
|
||||
def test_handle_partial_json_with_valid_partial() -> None:
|
||||
result = 'Some text {"name": "Charlie", "age": 35} more text'
|
||||
output = handle_partial_json(result, SimpleModel, False, None)
|
||||
assert isinstance(output, SimpleModel)
|
||||
@@ -177,7 +175,7 @@ def test_handle_partial_json_with_valid_partial():
|
||||
assert output.age == 35
|
||||
|
||||
|
||||
def test_handle_partial_json_with_invalid_partial(mock_agent):
|
||||
def test_handle_partial_json_with_invalid_partial(mock_agent) -> None:
|
||||
result = "No valid JSON here"
|
||||
with patch("crewai.utilities.converter.convert_with_instructions") as mock_convert:
|
||||
mock_convert.return_value = "Converted result"
|
||||
@@ -189,8 +187,8 @@ def test_handle_partial_json_with_invalid_partial(mock_agent):
|
||||
@patch("crewai.utilities.converter.create_converter")
|
||||
@patch("crewai.utilities.converter.get_conversion_instructions")
|
||||
def test_convert_with_instructions_success(
|
||||
mock_get_instructions, mock_create_converter, mock_agent
|
||||
):
|
||||
mock_get_instructions, mock_create_converter, mock_agent,
|
||||
) -> None:
|
||||
mock_get_instructions.return_value = "Instructions"
|
||||
mock_converter = Mock()
|
||||
mock_converter.to_pydantic.return_value = SimpleModel(name="David", age=50)
|
||||
@@ -207,8 +205,8 @@ def test_convert_with_instructions_success(
|
||||
@patch("crewai.utilities.converter.create_converter")
|
||||
@patch("crewai.utilities.converter.get_conversion_instructions")
|
||||
def test_convert_with_instructions_failure(
|
||||
mock_get_instructions, mock_create_converter, mock_agent
|
||||
):
|
||||
mock_get_instructions, mock_create_converter, mock_agent,
|
||||
) -> None:
|
||||
mock_get_instructions.return_value = "Instructions"
|
||||
mock_converter = Mock()
|
||||
mock_converter.to_pydantic.return_value = ConverterError("Conversion failed")
|
||||
@@ -222,7 +220,7 @@ def test_convert_with_instructions_failure(
|
||||
|
||||
|
||||
# Tests for get_conversion_instructions
|
||||
def test_get_conversion_instructions_gpt():
|
||||
def test_get_conversion_instructions_gpt() -> None:
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
with patch.object(LLM, "supports_function_calling") as supports_function_calling:
|
||||
supports_function_calling.return_value = True
|
||||
@@ -237,7 +235,7 @@ def test_get_conversion_instructions_gpt():
|
||||
assert instructions == expected_instructions
|
||||
|
||||
|
||||
def test_get_conversion_instructions_non_gpt():
|
||||
def test_get_conversion_instructions_non_gpt() -> None:
|
||||
llm = LLM(model="ollama/llama3.1", base_url="http://localhost:11434")
|
||||
with patch.object(LLM, "supports_function_calling", return_value=False):
|
||||
instructions = get_conversion_instructions(SimpleModel, llm)
|
||||
@@ -246,17 +244,17 @@ def test_get_conversion_instructions_non_gpt():
|
||||
|
||||
|
||||
# Tests for is_gpt
|
||||
def test_supports_function_calling_true():
|
||||
def test_supports_function_calling_true() -> None:
|
||||
llm = LLM(model="gpt-4o")
|
||||
assert llm.supports_function_calling() is True
|
||||
|
||||
|
||||
def test_supports_function_calling_false():
|
||||
def test_supports_function_calling_false() -> None:
|
||||
llm = LLM(model="non-existent-model")
|
||||
assert llm.supports_function_calling() is False
|
||||
|
||||
|
||||
def test_create_converter_with_mock_agent():
|
||||
def test_create_converter_with_mock_agent() -> None:
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.get_output_converter.return_value = MagicMock(spec=Converter)
|
||||
|
||||
@@ -272,7 +270,7 @@ def test_create_converter_with_mock_agent():
|
||||
mock_agent.get_output_converter.assert_called_once()
|
||||
|
||||
|
||||
def test_create_converter_with_custom_converter():
|
||||
def test_create_converter_with_custom_converter() -> None:
|
||||
converter = create_converter(
|
||||
converter_cls=CustomConverter,
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
@@ -284,22 +282,22 @@ def test_create_converter_with_custom_converter():
|
||||
assert isinstance(converter, CustomConverter)
|
||||
|
||||
|
||||
def test_create_converter_fails_without_agent_or_converter_cls():
|
||||
def test_create_converter_fails_without_agent_or_converter_cls() -> None:
|
||||
with pytest.raises(
|
||||
ValueError, match="Either agent or converter_cls must be provided"
|
||||
ValueError, match="Either agent or converter_cls must be provided",
|
||||
):
|
||||
create_converter(
|
||||
llm=Mock(), text="Sample", model=SimpleModel, instructions="Convert"
|
||||
llm=Mock(), text="Sample", model=SimpleModel, instructions="Convert",
|
||||
)
|
||||
|
||||
|
||||
def test_generate_model_description_simple_model():
|
||||
def test_generate_model_description_simple_model() -> None:
|
||||
description = generate_model_description(SimpleModel)
|
||||
expected_description = '{\n "name": str,\n "age": int\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_nested_model():
|
||||
def test_generate_model_description_nested_model() -> None:
|
||||
description = generate_model_description(NestedModel)
|
||||
expected_description = (
|
||||
'{\n "id": int,\n "data": {\n "name": str,\n "age": int\n}\n}'
|
||||
@@ -307,9 +305,9 @@ def test_generate_model_description_nested_model():
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_optional_field():
|
||||
def test_generate_model_description_optional_field() -> None:
|
||||
class ModelWithOptionalField(BaseModel):
|
||||
name: Optional[str]
|
||||
name: str | None
|
||||
age: int
|
||||
|
||||
description = generate_model_description(ModelWithOptionalField)
|
||||
@@ -317,18 +315,18 @@ def test_generate_model_description_optional_field():
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_list_field():
|
||||
def test_generate_model_description_list_field() -> None:
|
||||
class ModelWithListField(BaseModel):
|
||||
items: List[int]
|
||||
items: list[int]
|
||||
|
||||
description = generate_model_description(ModelWithListField)
|
||||
expected_description = '{\n "items": List[int]\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_dict_field():
|
||||
def test_generate_model_description_dict_field() -> None:
|
||||
class ModelWithDictField(BaseModel):
|
||||
attributes: Dict[str, int]
|
||||
attributes: dict[str, int]
|
||||
|
||||
description = generate_model_description(ModelWithDictField)
|
||||
expected_description = '{\n "attributes": Dict[str, int]\n}'
|
||||
@@ -336,7 +334,7 @@ def test_generate_model_description_dict_field():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_convert_with_instructions():
|
||||
def test_convert_with_instructions() -> None:
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
sample_text = "Name: Alice, Age: 30"
|
||||
|
||||
@@ -358,7 +356,7 @@ def test_convert_with_instructions():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_converter_with_llama3_2_model():
|
||||
def test_converter_with_llama3_2_model() -> None:
|
||||
llm = LLM(model="ollama/llama3.2:3b", base_url="http://localhost:11434")
|
||||
sample_text = "Name: Alice Llama, Age: 30"
|
||||
instructions = get_conversion_instructions(SimpleModel, llm)
|
||||
@@ -375,7 +373,7 @@ def test_converter_with_llama3_2_model():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_converter_with_llama3_1_model():
|
||||
def test_converter_with_llama3_1_model() -> None:
|
||||
llm = LLM(model="ollama/llama3.1", base_url="http://localhost:11434")
|
||||
sample_text = "Name: Alice Llama, Age: 30"
|
||||
instructions = get_conversion_instructions(SimpleModel, llm)
|
||||
@@ -392,7 +390,7 @@ def test_converter_with_llama3_1_model():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_converter_with_nested_model():
|
||||
def test_converter_with_nested_model() -> None:
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
sample_text = "Name: John Doe\nAge: 30\nAddress: 123 Main St, Anytown, 12345"
|
||||
|
||||
@@ -416,7 +414,7 @@ def test_converter_with_nested_model():
|
||||
|
||||
|
||||
# Tests for error handling
|
||||
def test_converter_error_handling():
|
||||
def test_converter_error_handling() -> None:
|
||||
llm = Mock(spec=LLM)
|
||||
llm.supports_function_calling.return_value = False
|
||||
llm.call.return_value = "Invalid JSON"
|
||||
@@ -431,13 +429,13 @@ def test_converter_error_handling():
|
||||
)
|
||||
|
||||
with pytest.raises(ConverterError) as exc_info:
|
||||
output = converter.to_pydantic()
|
||||
converter.to_pydantic()
|
||||
|
||||
assert "Failed to convert text into a Pydantic model" in str(exc_info.value)
|
||||
|
||||
|
||||
# Tests for retry logic
|
||||
def test_converter_retry_logic():
|
||||
def test_converter_retry_logic() -> None:
|
||||
llm = Mock(spec=LLM)
|
||||
llm.supports_function_calling.return_value = False
|
||||
llm.call.side_effect = [
|
||||
@@ -465,10 +463,10 @@ def test_converter_retry_logic():
|
||||
|
||||
|
||||
# Tests for optional fields
|
||||
def test_converter_with_optional_fields():
|
||||
def test_converter_with_optional_fields() -> None:
|
||||
class OptionalModel(BaseModel):
|
||||
name: str
|
||||
age: Optional[int]
|
||||
age: int | None
|
||||
|
||||
llm = Mock(spec=LLM)
|
||||
llm.supports_function_calling.return_value = False
|
||||
@@ -492,9 +490,9 @@ def test_converter_with_optional_fields():
|
||||
|
||||
|
||||
# Tests for list fields
|
||||
def test_converter_with_list_field():
|
||||
def test_converter_with_list_field() -> None:
|
||||
class ListModel(BaseModel):
|
||||
items: List[int]
|
||||
items: list[int]
|
||||
|
||||
llm = Mock(spec=LLM)
|
||||
llm.supports_function_calling.return_value = False
|
||||
@@ -519,7 +517,7 @@ def test_converter_with_list_field():
|
||||
from enum import Enum
|
||||
|
||||
|
||||
def test_converter_with_enum():
|
||||
def test_converter_with_enum() -> None:
|
||||
class Color(Enum):
|
||||
RED = "red"
|
||||
GREEN = "green"
|
||||
@@ -550,7 +548,7 @@ def test_converter_with_enum():
|
||||
|
||||
|
||||
# Tests for ambiguous input
|
||||
def test_converter_with_ambiguous_input():
|
||||
def test_converter_with_ambiguous_input() -> None:
|
||||
llm = Mock(spec=LLM)
|
||||
llm.supports_function_calling.return_value = False
|
||||
llm.call.return_value = '{"name": "Charlie", "age": "Not an age"}'
|
||||
@@ -565,13 +563,13 @@ def test_converter_with_ambiguous_input():
|
||||
)
|
||||
|
||||
with pytest.raises(ConverterError) as exc_info:
|
||||
output = converter.to_pydantic()
|
||||
converter.to_pydantic()
|
||||
|
||||
assert "failed to convert text into a pydantic model" in str(exc_info.value).lower()
|
||||
|
||||
|
||||
# Tests for function calling support
|
||||
def test_converter_with_function_calling():
|
||||
def test_converter_with_function_calling() -> None:
|
||||
llm = Mock(spec=LLM)
|
||||
llm.supports_function_calling.return_value = True
|
||||
|
||||
@@ -594,7 +592,7 @@ def test_converter_with_function_calling():
|
||||
instructor.to_pydantic.assert_called_once()
|
||||
|
||||
|
||||
def test_generate_model_description_union_field():
|
||||
def test_generate_model_description_union_field() -> None:
|
||||
class UnionModel(BaseModel):
|
||||
field: int | str | None
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import NoReturn
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
@@ -38,7 +38,6 @@ from crewai.utilities.events.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallFailedEvent,
|
||||
LLMCallStartedEvent,
|
||||
LLMCallType,
|
||||
LLMStreamChunkEvent,
|
||||
)
|
||||
from crewai.utilities.events.task_events import (
|
||||
@@ -74,21 +73,21 @@ event_listener = EventListener()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_start_kickoff_event():
|
||||
def test_crew_emits_start_kickoff_event() -> None:
|
||||
received_events = []
|
||||
mock_span = Mock()
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def handle_crew_start(source, event):
|
||||
def handle_crew_start(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
with (
|
||||
patch.object(
|
||||
event_listener._telemetry, "crew_execution_span", return_value=mock_span
|
||||
event_listener._telemetry, "crew_execution_span", return_value=mock_span,
|
||||
) as mock_crew_execution_span,
|
||||
patch.object(
|
||||
event_listener._telemetry, "end_crew", return_value=mock_span
|
||||
event_listener._telemetry, "end_crew", return_value=mock_span,
|
||||
) as mock_crew_ended,
|
||||
):
|
||||
crew.kickoff()
|
||||
@@ -102,11 +101,11 @@ def test_crew_emits_start_kickoff_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_end_kickoff_event():
|
||||
def test_crew_emits_end_kickoff_event() -> None:
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def handle_crew_end(source, event):
|
||||
def handle_crew_end(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
@@ -120,22 +119,22 @@ def test_crew_emits_end_kickoff_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_test_kickoff_type_event():
|
||||
def test_crew_emits_test_kickoff_type_event() -> None:
|
||||
received_events = []
|
||||
mock_span = Mock()
|
||||
|
||||
@crewai_event_bus.on(CrewTestStartedEvent)
|
||||
def handle_crew_end(source, event):
|
||||
def handle_crew_end(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(CrewTestCompletedEvent)
|
||||
def handle_crew_test_end(source, event):
|
||||
def handle_crew_test_end(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
eval_llm = LLM(model="gpt-4o-mini")
|
||||
with (
|
||||
patch.object(
|
||||
event_listener._telemetry, "test_execution_span", return_value=mock_span
|
||||
event_listener._telemetry, "test_execution_span", return_value=mock_span,
|
||||
) as mock_crew_execution_span,
|
||||
):
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
@@ -159,13 +158,13 @@ def test_crew_emits_test_kickoff_type_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_kickoff_failed_event():
|
||||
def test_crew_emits_kickoff_failed_event() -> None:
|
||||
received_events = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffFailedEvent)
|
||||
def handle_crew_failed(source, event):
|
||||
def handle_crew_failed(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
@@ -184,11 +183,11 @@ def test_crew_emits_kickoff_failed_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_start_task_event():
|
||||
def test_crew_emits_start_task_event() -> None:
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(TaskStartedEvent)
|
||||
def handle_task_start(source, event):
|
||||
def handle_task_start(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
@@ -201,21 +200,21 @@ def test_crew_emits_start_task_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_end_task_event():
|
||||
def test_crew_emits_end_task_event() -> None:
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(TaskCompletedEvent)
|
||||
def handle_task_end(source, event):
|
||||
def handle_task_end(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
mock_span = Mock()
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
with (
|
||||
patch.object(
|
||||
event_listener._telemetry, "task_started", return_value=mock_span
|
||||
event_listener._telemetry, "task_started", return_value=mock_span,
|
||||
) as mock_task_started,
|
||||
patch.object(
|
||||
event_listener._telemetry, "task_ended", return_value=mock_span
|
||||
event_listener._telemetry, "task_ended", return_value=mock_span,
|
||||
) as mock_task_ended,
|
||||
):
|
||||
crew.kickoff()
|
||||
@@ -229,12 +228,12 @@ def test_crew_emits_end_task_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_task_emits_failed_event_on_execution_error():
|
||||
def test_task_emits_failed_event_on_execution_error() -> None:
|
||||
received_events = []
|
||||
received_sources = []
|
||||
|
||||
@crewai_event_bus.on(TaskFailedEvent)
|
||||
def handle_task_failed(source, event):
|
||||
def handle_task_failed(source, event) -> None:
|
||||
received_events.append(event)
|
||||
received_sources.append(source)
|
||||
|
||||
@@ -266,15 +265,15 @@ def test_task_emits_failed_event_on_execution_error():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_emits_execution_started_and_completed_events():
|
||||
def test_agent_emits_execution_started_and_completed_events() -> None:
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionStartedEvent)
|
||||
def handle_agent_start(source, event):
|
||||
def handle_agent_start(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionCompletedEvent)
|
||||
def handle_agent_completed(source, event):
|
||||
def handle_agent_completed(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
@@ -295,21 +294,21 @@ def test_agent_emits_execution_started_and_completed_events():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_emits_execution_error_event():
|
||||
def test_agent_emits_execution_error_event() -> None:
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionErrorEvent)
|
||||
def handle_agent_start(source, event):
|
||||
def handle_agent_start(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
error_message = "Error happening while sending prompt to model."
|
||||
base_agent.max_retry_limit = 0
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "invoke", wraps=base_agent.agent_executor.invoke
|
||||
CrewAgentExecutor, "invoke", wraps=base_agent.agent_executor.invoke,
|
||||
) as invoke_mock:
|
||||
invoke_mock.side_effect = Exception(error_message)
|
||||
|
||||
with pytest.raises(Exception) as e:
|
||||
with pytest.raises(Exception):
|
||||
base_agent.execute_task(
|
||||
task=base_task,
|
||||
)
|
||||
@@ -325,7 +324,7 @@ def test_agent_emits_execution_error_event():
|
||||
class SayHiTool(BaseTool):
|
||||
name: str = Field(default="say_hi", description="The name of the tool")
|
||||
description: str = Field(
|
||||
default="Say hi", description="The description of the tool"
|
||||
default="Say hi", description="The description of the tool",
|
||||
)
|
||||
|
||||
def _run(self) -> str:
|
||||
@@ -333,11 +332,11 @@ class SayHiTool(BaseTool):
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_tools_emits_finished_events():
|
||||
def test_tools_emits_finished_events() -> None:
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(ToolUsageFinishedEvent)
|
||||
def handle_tool_end(source, event):
|
||||
def handle_tool_end(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
agent = Agent(
|
||||
@@ -364,16 +363,16 @@ def test_tools_emits_finished_events():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_tools_emits_error_events():
|
||||
def test_tools_emits_error_events() -> None:
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def handle_tool_end(source, event):
|
||||
def handle_tool_end(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
class ErrorTool(BaseTool):
|
||||
name: str = Field(
|
||||
default="error_tool", description="A tool that raises an error"
|
||||
default="error_tool", description="A tool that raises an error",
|
||||
)
|
||||
description: str = Field(
|
||||
default="This tool always raises an error",
|
||||
@@ -381,7 +380,8 @@ def test_tools_emits_error_events():
|
||||
)
|
||||
|
||||
def _run(self) -> str:
|
||||
raise Exception("Simulated tool error")
|
||||
msg = "Simulated tool error"
|
||||
raise Exception(msg)
|
||||
|
||||
agent = Agent(
|
||||
role="base_agent",
|
||||
@@ -410,22 +410,22 @@ def test_tools_emits_error_events():
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
|
||||
|
||||
def test_flow_emits_start_event():
|
||||
def test_flow_emits_start_event() -> None:
|
||||
received_events = []
|
||||
mock_span = Mock()
|
||||
|
||||
@crewai_event_bus.on(FlowStartedEvent)
|
||||
def handle_flow_start(source, event):
|
||||
def handle_flow_start(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
class TestFlow(Flow[dict]):
|
||||
@start()
|
||||
def begin(self):
|
||||
def begin(self) -> str:
|
||||
return "started"
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
event_listener._telemetry, "flow_execution_span", return_value=mock_span
|
||||
event_listener._telemetry, "flow_execution_span", return_value=mock_span,
|
||||
) as mock_flow_execution_span,
|
||||
):
|
||||
flow = TestFlow()
|
||||
@@ -437,18 +437,18 @@ def test_flow_emits_start_event():
|
||||
assert received_events[0].type == "flow_started"
|
||||
|
||||
|
||||
def test_flow_emits_finish_event():
|
||||
def test_flow_emits_finish_event() -> None:
|
||||
received_events = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(FlowFinishedEvent)
|
||||
def handle_flow_finish(source, event):
|
||||
def handle_flow_finish(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
class TestFlow(Flow[dict]):
|
||||
@start()
|
||||
def begin(self):
|
||||
def begin(self) -> str:
|
||||
return "completed"
|
||||
|
||||
flow = TestFlow()
|
||||
@@ -461,23 +461,22 @@ def test_flow_emits_finish_event():
|
||||
assert result == "completed"
|
||||
|
||||
|
||||
def test_flow_emits_method_execution_started_event():
|
||||
def test_flow_emits_method_execution_started_event() -> None:
|
||||
received_events = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionStartedEvent)
|
||||
def handle_method_start(source, event):
|
||||
print("event in method name", event.method_name)
|
||||
def handle_method_start(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
class TestFlow(Flow[dict]):
|
||||
@start()
|
||||
def begin(self):
|
||||
def begin(self) -> str:
|
||||
return "started"
|
||||
|
||||
@listen("begin")
|
||||
def second_method(self):
|
||||
def second_method(self) -> str:
|
||||
return "executed"
|
||||
|
||||
flow = TestFlow()
|
||||
@@ -495,10 +494,10 @@ def test_flow_emits_method_execution_started_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_register_handler_adds_new_handler():
|
||||
def test_register_handler_adds_new_handler() -> None:
|
||||
received_events = []
|
||||
|
||||
def custom_handler(source, event):
|
||||
def custom_handler(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@@ -513,14 +512,14 @@ def test_register_handler_adds_new_handler():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_multiple_handlers_for_same_event():
|
||||
def test_multiple_handlers_for_same_event() -> None:
|
||||
received_events_1 = []
|
||||
received_events_2 = []
|
||||
|
||||
def handler_1(source, event):
|
||||
def handler_1(source, event) -> None:
|
||||
received_events_1.append(event)
|
||||
|
||||
def handler_2(source, event):
|
||||
def handler_2(source, event) -> None:
|
||||
received_events_2.append(event)
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@@ -536,22 +535,22 @@ def test_multiple_handlers_for_same_event():
|
||||
assert received_events_2[0].type == "crew_kickoff_started"
|
||||
|
||||
|
||||
def test_flow_emits_created_event():
|
||||
def test_flow_emits_created_event() -> None:
|
||||
received_events = []
|
||||
mock_span = Mock()
|
||||
|
||||
@crewai_event_bus.on(FlowCreatedEvent)
|
||||
def handle_flow_created(source, event):
|
||||
def handle_flow_created(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
class TestFlow(Flow[dict]):
|
||||
@start()
|
||||
def begin(self):
|
||||
def begin(self) -> str:
|
||||
return "started"
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
event_listener._telemetry, "flow_creation_span", return_value=mock_span
|
||||
event_listener._telemetry, "flow_creation_span", return_value=mock_span,
|
||||
) as mock_flow_creation_span,
|
||||
):
|
||||
flow = TestFlow()
|
||||
@@ -564,17 +563,17 @@ def test_flow_emits_created_event():
|
||||
assert received_events[0].type == "flow_created"
|
||||
|
||||
|
||||
def test_flow_emits_method_execution_failed_event():
|
||||
def test_flow_emits_method_execution_failed_event() -> None:
|
||||
received_events = []
|
||||
error = Exception("Simulated method failure")
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionFailedEvent)
|
||||
def handle_method_failed(source, event):
|
||||
def handle_method_failed(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
class TestFlow(Flow[dict]):
|
||||
@start()
|
||||
def begin(self):
|
||||
def begin(self) -> NoReturn:
|
||||
raise error
|
||||
|
||||
flow = TestFlow()
|
||||
@@ -589,15 +588,15 @@ def test_flow_emits_method_execution_failed_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_emits_call_started_event():
|
||||
def test_llm_emits_call_started_event() -> None:
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(LLMCallStartedEvent)
|
||||
def handle_llm_call_started(source, event):
|
||||
def handle_llm_call_started(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMCallCompletedEvent)
|
||||
def handle_llm_call_completed(source, event):
|
||||
def handle_llm_call_completed(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
@@ -609,11 +608,11 @@ def test_llm_emits_call_started_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_emits_call_failed_event():
|
||||
def test_llm_emits_call_failed_event() -> None:
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(LLMCallFailedEvent)
|
||||
def handle_llm_call_failed(source, event):
|
||||
def handle_llm_call_failed(source, event) -> None:
|
||||
received_events.append(event)
|
||||
|
||||
error_message = "Simulated LLM call failure"
|
||||
@@ -629,14 +628,14 @@ def test_llm_emits_call_failed_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_emits_stream_chunk_events():
|
||||
def test_llm_emits_stream_chunk_events() -> None:
|
||||
"""Test that LLM emits stream chunk events when streaming is enabled."""
|
||||
received_chunks = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(LLMStreamChunkEvent)
|
||||
def handle_stream_chunk(source, event):
|
||||
def handle_stream_chunk(source, event) -> None:
|
||||
received_chunks.append(event.chunk)
|
||||
|
||||
# Create an LLM with streaming enabled
|
||||
@@ -653,14 +652,14 @@ def test_llm_emits_stream_chunk_events():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_no_stream_chunks_when_streaming_disabled():
|
||||
def test_llm_no_stream_chunks_when_streaming_disabled() -> None:
|
||||
"""Test that LLM doesn't emit stream chunk events when streaming is disabled."""
|
||||
received_chunks = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(LLMStreamChunkEvent)
|
||||
def handle_stream_chunk(source, event):
|
||||
def handle_stream_chunk(source, event) -> None:
|
||||
received_chunks.append(event.chunk)
|
||||
|
||||
# Create an LLM with streaming disabled
|
||||
@@ -673,11 +672,12 @@ def test_llm_no_stream_chunks_when_streaming_disabled():
|
||||
assert len(received_chunks) == 0
|
||||
|
||||
# Verify we got a response
|
||||
assert response and isinstance(response, str)
|
||||
assert response
|
||||
assert isinstance(response, str)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_streaming_fallback_to_non_streaming():
|
||||
def test_streaming_fallback_to_non_streaming() -> None:
|
||||
"""Test that streaming falls back to non-streaming when there's an error."""
|
||||
received_chunks = []
|
||||
fallback_called = False
|
||||
@@ -685,7 +685,7 @@ def test_streaming_fallback_to_non_streaming():
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(LLMStreamChunkEvent)
|
||||
def handle_stream_chunk(source, event):
|
||||
def handle_stream_chunk(source, event) -> None:
|
||||
received_chunks.append(event.chunk)
|
||||
|
||||
# Create an LLM with streaming enabled
|
||||
@@ -695,7 +695,7 @@ def test_streaming_fallback_to_non_streaming():
|
||||
original_call = llm.call
|
||||
|
||||
# Create a mock call method that handles the streaming error
|
||||
def mock_call(messages, tools=None, callbacks=None, available_functions=None):
|
||||
def mock_call(messages, tools=None, callbacks=None, available_functions=None) -> str:
|
||||
nonlocal fallback_called
|
||||
# Emit a couple of chunks to simulate partial streaming
|
||||
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk="Test chunk 1"))
|
||||
@@ -731,14 +731,14 @@ def test_streaming_fallback_to_non_streaming():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_streaming_empty_response_handling():
|
||||
def test_streaming_empty_response_handling() -> None:
|
||||
"""Test that streaming handles empty responses correctly."""
|
||||
received_chunks = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(LLMStreamChunkEvent)
|
||||
def handle_stream_chunk(source, event):
|
||||
def handle_stream_chunk(source, event) -> None:
|
||||
received_chunks.append(event.chunk)
|
||||
|
||||
# Create an LLM with streaming enabled
|
||||
@@ -748,7 +748,7 @@ def test_streaming_empty_response_handling():
|
||||
original_call = llm.call
|
||||
|
||||
# Create a mock call method that simulates empty chunks
|
||||
def mock_call(messages, tools=None, callbacks=None, available_functions=None):
|
||||
def mock_call(messages, tools=None, callbacks=None, available_functions=None) -> str:
|
||||
# Emit a few empty chunks
|
||||
for _ in range(3):
|
||||
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk=""))
|
||||
@@ -768,7 +768,8 @@ def test_streaming_empty_response_handling():
|
||||
assert all(chunk == "" for chunk in received_chunks)
|
||||
|
||||
# Verify the response is the default message for empty responses
|
||||
assert "I apologize" in response and "couldn't generate" in response
|
||||
assert "I apologize" in response
|
||||
assert "couldn't generate" in response
|
||||
|
||||
finally:
|
||||
# Restore the original method
|
||||
|
||||
@@ -7,16 +7,16 @@ from crewai.utilities.file_handler import PickleHandler
|
||||
|
||||
|
||||
class TestPickleHandler(unittest.TestCase):
|
||||
def setUp(self):
|
||||
def setUp(self) -> None:
|
||||
self.file_name = "test_data.pkl"
|
||||
self.file_path = os.path.join(os.getcwd(), self.file_name)
|
||||
self.handler = PickleHandler(self.file_name)
|
||||
|
||||
def tearDown(self):
|
||||
def tearDown(self) -> None:
|
||||
if os.path.exists(self.file_path):
|
||||
os.remove(self.file_path)
|
||||
|
||||
def test_initialize_file(self):
|
||||
def test_initialize_file(self) -> None:
|
||||
assert os.path.exists(self.file_path) is False
|
||||
|
||||
self.handler.initialize_file()
|
||||
@@ -24,17 +24,17 @@ class TestPickleHandler(unittest.TestCase):
|
||||
assert os.path.exists(self.file_path) is True
|
||||
assert os.path.getsize(self.file_path) >= 0
|
||||
|
||||
def test_save_and_load(self):
|
||||
def test_save_and_load(self) -> None:
|
||||
data = {"key": "value"}
|
||||
self.handler.save(data)
|
||||
loaded_data = self.handler.load()
|
||||
assert loaded_data == data
|
||||
|
||||
def test_load_empty_file(self):
|
||||
def test_load_empty_file(self) -> None:
|
||||
loaded_data = self.handler.load()
|
||||
assert loaded_data == {}
|
||||
|
||||
def test_load_corrupted_file(self):
|
||||
def test_load_corrupted_file(self) -> None:
|
||||
with open(self.file_path, "wb") as file:
|
||||
file.write(b"corrupted data")
|
||||
|
||||
@@ -42,4 +42,4 @@ class TestPickleHandler(unittest.TestCase):
|
||||
self.handler.load()
|
||||
|
||||
assert str(exc.value) == "pickle data was truncated"
|
||||
assert "<class '_pickle.UnpicklingError'>" == str(exc.type)
|
||||
assert str(exc.type) == "<class '_pickle.UnpicklingError'>"
|
||||
|
||||
@@ -3,38 +3,38 @@ import pytest
|
||||
from crewai.utilities.i18n import I18N
|
||||
|
||||
|
||||
def test_load_prompts():
|
||||
def test_load_prompts() -> None:
|
||||
i18n = I18N()
|
||||
i18n.load_prompts()
|
||||
assert i18n._prompts is not None
|
||||
|
||||
|
||||
def test_slice():
|
||||
def test_slice() -> None:
|
||||
i18n = I18N()
|
||||
i18n.load_prompts()
|
||||
assert isinstance(i18n.slice("role_playing"), str)
|
||||
|
||||
|
||||
def test_tools():
|
||||
def test_tools() -> None:
|
||||
i18n = I18N()
|
||||
i18n.load_prompts()
|
||||
assert isinstance(i18n.tools("ask_question"), str)
|
||||
|
||||
|
||||
def test_retrieve():
|
||||
def test_retrieve() -> None:
|
||||
i18n = I18N()
|
||||
i18n.load_prompts()
|
||||
assert isinstance(i18n.retrieve("slices", "role_playing"), str)
|
||||
|
||||
|
||||
def test_retrieve_not_found():
|
||||
def test_retrieve_not_found() -> None:
|
||||
i18n = I18N()
|
||||
i18n.load_prompts()
|
||||
with pytest.raises(Exception):
|
||||
i18n.retrieve("nonexistent_kind", "nonexistent_key")
|
||||
|
||||
|
||||
def test_prompt_file():
|
||||
def test_prompt_file() -> None:
|
||||
import os
|
||||
|
||||
path = os.path.join(os.path.dirname(__file__), "prompts.json")
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""
|
||||
Tests for verifying the integration of knowledge sources in the planning process.
|
||||
"""Tests for verifying the integration of knowledge sources in the planning process.
|
||||
This module ensures that agent knowledge is properly included during task planning.
|
||||
"""
|
||||
|
||||
@@ -15,11 +14,12 @@ from crewai.utilities.planning_handler import CrewPlanner
|
||||
|
||||
@pytest.fixture
|
||||
def mock_knowledge_source():
|
||||
"""
|
||||
Create a mock knowledge source with test content.
|
||||
"""Create a mock knowledge source with test content.
|
||||
|
||||
Returns:
|
||||
StringKnowledgeSource:
|
||||
A knowledge source containing AI-related test content
|
||||
A knowledge source containing AI-related test content.
|
||||
|
||||
"""
|
||||
content = """
|
||||
Important context about AI:
|
||||
@@ -29,13 +29,13 @@ def mock_knowledge_source():
|
||||
"""
|
||||
return StringKnowledgeSource(content=content)
|
||||
|
||||
@patch('crewai.knowledge.storage.knowledge_storage.chromadb')
|
||||
def test_knowledge_included_in_planning(mock_chroma):
|
||||
@patch("crewai.knowledge.storage.knowledge_storage.chromadb")
|
||||
def test_knowledge_included_in_planning(mock_chroma) -> None:
|
||||
"""Test that verifies knowledge sources are properly included in planning."""
|
||||
# Mock ChromaDB collection
|
||||
mock_collection = mock_chroma.return_value.get_or_create_collection.return_value
|
||||
mock_collection.add.return_value = None
|
||||
|
||||
|
||||
# Create an agent with knowledge
|
||||
agent = Agent(
|
||||
role="AI Researcher",
|
||||
@@ -43,16 +43,16 @@ def test_knowledge_included_in_planning(mock_chroma):
|
||||
backstory="Expert in artificial intelligence",
|
||||
knowledge_sources=[
|
||||
StringKnowledgeSource(
|
||||
content="AI systems require careful training and validation."
|
||||
)
|
||||
]
|
||||
content="AI systems require careful training and validation.",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
# Create a task for the agent
|
||||
task = Task(
|
||||
description="Explain the basics of AI systems",
|
||||
expected_output="A clear explanation of AI fundamentals",
|
||||
agent=agent
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
# Create a crew planner
|
||||
|
||||
@@ -8,25 +8,25 @@ from crewai.llm import LLM
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
|
||||
|
||||
def test_create_llm_with_llm_instance():
|
||||
def test_create_llm_with_llm_instance() -> None:
|
||||
existing_llm = LLM(model="gpt-4o")
|
||||
llm = create_llm(llm_value=existing_llm)
|
||||
assert llm is existing_llm
|
||||
|
||||
|
||||
def test_create_llm_with_valid_model_string():
|
||||
def test_create_llm_with_valid_model_string() -> None:
|
||||
llm = create_llm(llm_value="gpt-4o")
|
||||
assert isinstance(llm, LLM)
|
||||
assert llm.model == "gpt-4o"
|
||||
|
||||
|
||||
def test_create_llm_with_invalid_model_string():
|
||||
def test_create_llm_with_invalid_model_string() -> None:
|
||||
with pytest.raises(BadRequestError, match="LLM Provider NOT provided"):
|
||||
llm = create_llm(llm_value="invalid-model")
|
||||
llm.call(messages=[{"role": "user", "content": "Hello, world!"}])
|
||||
|
||||
|
||||
def test_create_llm_with_unknown_object_missing_attributes():
|
||||
def test_create_llm_with_unknown_object_missing_attributes() -> None:
|
||||
class UnknownObject:
|
||||
pass
|
||||
|
||||
@@ -38,7 +38,7 @@ def test_create_llm_with_unknown_object_missing_attributes():
|
||||
llm.call(messages=[{"role": "user", "content": "Hello, world!"}])
|
||||
|
||||
|
||||
def test_create_llm_with_none_uses_default_model():
|
||||
def test_create_llm_with_none_uses_default_model() -> None:
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
with patch("crewai.cli.constants.DEFAULT_LLM_MODEL", "gpt-4o"):
|
||||
llm = create_llm(llm_value=None)
|
||||
@@ -46,7 +46,7 @@ def test_create_llm_with_none_uses_default_model():
|
||||
assert llm.model == "gpt-4o-mini"
|
||||
|
||||
|
||||
def test_create_llm_with_unknown_object():
|
||||
def test_create_llm_with_unknown_object() -> None:
|
||||
class UnknownObject:
|
||||
model_name = "gpt-4o"
|
||||
temperature = 0.7
|
||||
@@ -60,7 +60,7 @@ def test_create_llm_with_unknown_object():
|
||||
assert llm.max_tokens == 1500
|
||||
|
||||
|
||||
def test_create_llm_from_env_with_unaccepted_attributes():
|
||||
def test_create_llm_from_env_with_unaccepted_attributes() -> None:
|
||||
with patch.dict(
|
||||
os.environ,
|
||||
{
|
||||
@@ -78,7 +78,7 @@ def test_create_llm_from_env_with_unaccepted_attributes():
|
||||
assert not hasattr(llm, "AWS_REGION_NAME")
|
||||
|
||||
|
||||
def test_create_llm_with_partial_attributes():
|
||||
def test_create_llm_with_partial_attributes() -> None:
|
||||
class PartialAttributes:
|
||||
model_name = "gpt-4o"
|
||||
# temperature is missing
|
||||
@@ -90,7 +90,7 @@ def test_create_llm_with_partial_attributes():
|
||||
assert llm.temperature is None # Should handle missing attributes gracefully
|
||||
|
||||
|
||||
def test_create_llm_with_invalid_type():
|
||||
def test_create_llm_with_invalid_type() -> None:
|
||||
with pytest.raises(BadRequestError, match="LLM Provider NOT provided"):
|
||||
llm = create_llm(llm_value=42)
|
||||
llm.call(messages=[{"role": "user", "content": "Hello, world!"}])
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
from typing import Optional
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
@@ -45,12 +43,12 @@ class InternalCrewPlanner:
|
||||
description="Task 1",
|
||||
expected_output="Output 1",
|
||||
agent=Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1"),
|
||||
)
|
||||
),
|
||||
]
|
||||
planning_agent_llm = "gpt-3.5-turbo"
|
||||
return CrewPlanner(tasks, planning_agent_llm)
|
||||
|
||||
def test_handle_crew_planning(self, crew_planner):
|
||||
def test_handle_crew_planning(self, crew_planner) -> None:
|
||||
list_of_plans_per_task = [
|
||||
PlanPerTask(task="Task1", plan="Plan 1"),
|
||||
PlanPerTask(task="Task2", plan="Plan 2"),
|
||||
@@ -61,7 +59,7 @@ class InternalCrewPlanner:
|
||||
description="Description",
|
||||
agent="agent",
|
||||
pydantic=PlannerTaskPydanticOutput(
|
||||
list_of_plans_per_task=list_of_plans_per_task
|
||||
list_of_plans_per_task=list_of_plans_per_task,
|
||||
),
|
||||
)
|
||||
result = crew_planner._handle_crew_planning()
|
||||
@@ -70,12 +68,12 @@ class InternalCrewPlanner:
|
||||
assert len(result.list_of_plans_per_task) == len(crew_planner.tasks)
|
||||
execute.assert_called_once()
|
||||
|
||||
def test_create_planning_agent(self, crew_planner):
|
||||
def test_create_planning_agent(self, crew_planner) -> None:
|
||||
agent = crew_planner._create_planning_agent()
|
||||
assert isinstance(agent, Agent)
|
||||
assert agent.role == "Task Execution Planner"
|
||||
|
||||
def test_create_planner_task(self, crew_planner):
|
||||
def test_create_planner_task(self, crew_planner) -> None:
|
||||
planning_agent = Agent(
|
||||
role="Planning Agent",
|
||||
goal="Plan Step by Step Plan",
|
||||
@@ -92,7 +90,7 @@ class InternalCrewPlanner:
|
||||
== "Step by step plan on how the agents can execute their tasks using the available tools with mastery"
|
||||
)
|
||||
|
||||
def test_create_tasks_summary(self, crew_planner):
|
||||
def test_create_tasks_summary(self, crew_planner) -> None:
|
||||
tasks_summary = crew_planner._create_tasks_summary()
|
||||
assert isinstance(tasks_summary, str)
|
||||
assert tasks_summary.startswith("\n Task Number 1 - Task 1")
|
||||
@@ -100,8 +98,8 @@ class InternalCrewPlanner:
|
||||
# Knowledge field should not be present when empty
|
||||
assert '"agent_knowledge"' not in tasks_summary
|
||||
|
||||
@patch('crewai.knowledge.storage.knowledge_storage.chromadb')
|
||||
def test_create_tasks_summary_with_knowledge_and_tools(self, mock_chroma):
|
||||
@patch("crewai.knowledge.storage.knowledge_storage.chromadb")
|
||||
def test_create_tasks_summary_with_knowledge_and_tools(self, mock_chroma) -> None:
|
||||
"""Test task summary generation with both knowledge and tools present."""
|
||||
# Mock ChromaDB collection
|
||||
mock_collection = mock_chroma.return_value.get_or_create_collection.return_value
|
||||
@@ -112,20 +110,20 @@ class InternalCrewPlanner:
|
||||
name: str
|
||||
description: str
|
||||
|
||||
def __init__(self, name: str, description: str):
|
||||
def __init__(self, name: str, description: str) -> None:
|
||||
tool_data = {"name": name, "description": description}
|
||||
super().__init__(**tool_data)
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
return self.name
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self) -> str:
|
||||
return self.name
|
||||
|
||||
def to_structured_tool(self):
|
||||
return self
|
||||
|
||||
def _run(self, *args, **kwargs):
|
||||
def _run(self, *args, **kwargs) -> None:
|
||||
pass
|
||||
|
||||
def _generate_description(self) -> str:
|
||||
@@ -145,9 +143,9 @@ class InternalCrewPlanner:
|
||||
backstory="Test Backstory",
|
||||
tools=[tool1, tool2],
|
||||
knowledge_sources=[
|
||||
StringKnowledgeSource(content="Test knowledge content")
|
||||
]
|
||||
)
|
||||
StringKnowledgeSource(content="Test knowledge content"),
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
# Create planner with the new task
|
||||
@@ -163,13 +161,13 @@ class InternalCrewPlanner:
|
||||
assert task.agent.role in tasks_summary
|
||||
assert task.agent.goal in tasks_summary
|
||||
|
||||
def test_handle_crew_planning_different_llm(self, crew_planner_different_llm):
|
||||
def test_handle_crew_planning_different_llm(self, crew_planner_different_llm) -> None:
|
||||
with patch.object(Task, "execute_sync") as execute:
|
||||
execute.return_value = TaskOutput(
|
||||
description="Description",
|
||||
agent="agent",
|
||||
pydantic=PlannerTaskPydanticOutput(
|
||||
list_of_plans_per_task=[PlanPerTask(task="Task1", plan="Plan 1")]
|
||||
list_of_plans_per_task=[PlanPerTask(task="Task1", plan="Plan 1")],
|
||||
),
|
||||
)
|
||||
result = crew_planner_different_llm._handle_crew_planning()
|
||||
@@ -177,6 +175,6 @@ class InternalCrewPlanner:
|
||||
assert crew_planner_different_llm.planning_agent_llm == "gpt-3.5-turbo"
|
||||
assert isinstance(result, PlannerTaskPydanticOutput)
|
||||
assert len(result.list_of_plans_per_task) == len(
|
||||
crew_planner_different_llm.tasks
|
||||
crew_planner_different_llm.tasks,
|
||||
)
|
||||
execute.assert_called_once()
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
|
||||
|
||||
def test_simple_model():
|
||||
def test_simple_model() -> None:
|
||||
class SimpleModel(BaseModel):
|
||||
field1: int
|
||||
field2: str
|
||||
@@ -21,7 +19,7 @@ def test_simple_model():
|
||||
assert schema.strip() == expected_schema.strip()
|
||||
|
||||
|
||||
def test_nested_model():
|
||||
def test_nested_model() -> None:
|
||||
class NestedModel(BaseModel):
|
||||
nested_field: int
|
||||
|
||||
@@ -42,9 +40,9 @@ def test_nested_model():
|
||||
assert schema.strip() == expected_schema.strip()
|
||||
|
||||
|
||||
def test_model_with_list():
|
||||
def test_model_with_list() -> None:
|
||||
class ListModel(BaseModel):
|
||||
list_field: List[int]
|
||||
list_field: list[int]
|
||||
|
||||
parser = PydanticSchemaParser(model=ListModel)
|
||||
schema = parser.get_schema()
|
||||
@@ -55,9 +53,9 @@ def test_model_with_list():
|
||||
assert schema.strip() == expected_schema.strip()
|
||||
|
||||
|
||||
def test_model_with_optional_field():
|
||||
def test_model_with_optional_field() -> None:
|
||||
class OptionalModel(BaseModel):
|
||||
optional_field: Optional[str]
|
||||
optional_field: str | None
|
||||
|
||||
parser = PydanticSchemaParser(model=OptionalModel)
|
||||
schema = parser.get_schema()
|
||||
@@ -68,9 +66,9 @@ def test_model_with_optional_field():
|
||||
assert schema.strip() == expected_schema.strip()
|
||||
|
||||
|
||||
def test_model_with_union():
|
||||
def test_model_with_union() -> None:
|
||||
class UnionModel(BaseModel):
|
||||
union_field: Union[int, str]
|
||||
union_field: int | str
|
||||
|
||||
parser = PydanticSchemaParser(model=UnionModel)
|
||||
schema = parser.get_schema()
|
||||
@@ -81,9 +79,9 @@ def test_model_with_union():
|
||||
assert schema.strip() == expected_schema.strip()
|
||||
|
||||
|
||||
def test_model_with_dict():
|
||||
def test_model_with_dict() -> None:
|
||||
class DictModel(BaseModel):
|
||||
dict_field: Dict[str, int]
|
||||
dict_field: dict[str, int]
|
||||
|
||||
parser = PydanticSchemaParser(model=DictModel)
|
||||
schema = parser.get_schema()
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
from datetime import date, datetime
|
||||
from typing import List
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
@@ -19,11 +17,11 @@ class Person(BaseModel):
|
||||
age: int
|
||||
address: Address
|
||||
birthday: date
|
||||
skills: List[str]
|
||||
skills: list[str]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"test_input,expected",
|
||||
("test_input", "expected"),
|
||||
[
|
||||
({"text": "hello world"}, {"text": "hello world"}),
|
||||
({"number": 42}, {"number": 42}),
|
||||
@@ -36,25 +34,25 @@ class Person(BaseModel):
|
||||
({"nested": [1, [2, 3], {4, 5}]}, {"nested": [1, [2, 3], [4, 5]]}),
|
||||
],
|
||||
)
|
||||
def test_basic_serialization(test_input, expected):
|
||||
def test_basic_serialization(test_input, expected) -> None:
|
||||
result = to_serializable(test_input)
|
||||
assert result == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input_date,expected",
|
||||
("input_date", "expected"),
|
||||
[
|
||||
(date(2024, 1, 1), "2024-01-01"),
|
||||
(datetime(2024, 1, 1, 12, 30), "2024-01-01T12:30:00"),
|
||||
],
|
||||
)
|
||||
def test_temporal_serialization(input_date, expected):
|
||||
def test_temporal_serialization(input_date, expected) -> None:
|
||||
result = to_serializable({"date": input_date})
|
||||
assert result["date"] == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"key,value,expected_key_type",
|
||||
("key", "value", "expected_key_type"),
|
||||
[
|
||||
(("tuple", "key"), "value", str),
|
||||
(None, "value", str),
|
||||
@@ -62,7 +60,7 @@ def test_temporal_serialization(input_date, expected):
|
||||
("normal", "value", str),
|
||||
],
|
||||
)
|
||||
def test_dictionary_key_serialization(key, value, expected_key_type):
|
||||
def test_dictionary_key_serialization(key, value, expected_key_type) -> None:
|
||||
result = to_serializable({key: value})
|
||||
assert len(result) == 1
|
||||
result_key = next(iter(result.keys()))
|
||||
@@ -71,19 +69,19 @@ def test_dictionary_key_serialization(key, value, expected_key_type):
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"callable_obj,expected_in_result",
|
||||
("callable_obj", "expected_in_result"),
|
||||
[
|
||||
(lambda x: x * 2, "lambda"),
|
||||
(str.upper, "upper"),
|
||||
],
|
||||
)
|
||||
def test_callable_serialization(callable_obj, expected_in_result):
|
||||
def test_callable_serialization(callable_obj, expected_in_result) -> None:
|
||||
result = to_serializable({"func": callable_obj})
|
||||
assert isinstance(result["func"], str)
|
||||
assert expected_in_result in result["func"].lower()
|
||||
|
||||
|
||||
def test_pydantic_model_serialization():
|
||||
def test_pydantic_model_serialization() -> None:
|
||||
address = Address(street="123 Main St", city="Tech City", country="Pythonia")
|
||||
|
||||
person = Person(
|
||||
@@ -108,8 +106,8 @@ def test_pydantic_model_serialization():
|
||||
)
|
||||
|
||||
|
||||
def test_depth_limit():
|
||||
"""Test max depth handling with a deeply nested structure"""
|
||||
def test_depth_limit() -> None:
|
||||
"""Test max depth handling with a deeply nested structure."""
|
||||
|
||||
def create_nested(depth):
|
||||
if depth == 0:
|
||||
@@ -124,15 +122,15 @@ def test_depth_limit():
|
||||
"next": {
|
||||
"next": {
|
||||
"next": {
|
||||
"next": "{'next': {'next': {'next': {'next': {'next': 'value'}}}}}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"next": "{'next': {'next': {'next': {'next': {'next': 'value'}}}}}",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_exclude_keys():
|
||||
def test_exclude_keys() -> None:
|
||||
result = to_serializable({"key1": "value1", "key2": "value2"}, exclude={"key1"})
|
||||
assert result == {"key2": "value2"}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict, List, Union
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -8,10 +8,10 @@ from crewai.utilities.string_utils import interpolate_only
|
||||
class TestInterpolateOnly:
|
||||
"""Tests for the interpolate_only function in string_utils.py."""
|
||||
|
||||
def test_basic_variable_interpolation(self):
|
||||
def test_basic_variable_interpolation(self) -> None:
|
||||
"""Test basic variable interpolation works correctly."""
|
||||
template = "Hello, {name}! Welcome to {company}."
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
inputs: dict[str, str | int | float | dict[str, Any] | list[Any]] = {
|
||||
"name": "Alice",
|
||||
"company": "CrewAI",
|
||||
}
|
||||
@@ -20,18 +20,18 @@ class TestInterpolateOnly:
|
||||
|
||||
assert result == "Hello, Alice! Welcome to CrewAI."
|
||||
|
||||
def test_multiple_occurrences_of_same_variable(self):
|
||||
def test_multiple_occurrences_of_same_variable(self) -> None:
|
||||
"""Test that multiple occurrences of the same variable are replaced."""
|
||||
template = "{name} is using {name}'s account."
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"name": "Bob"
|
||||
inputs: dict[str, str | int | float | dict[str, Any] | list[Any]] = {
|
||||
"name": "Bob",
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
|
||||
assert result == "Bob is using Bob's account."
|
||||
|
||||
def test_json_structure_preservation(self):
|
||||
def test_json_structure_preservation(self) -> None:
|
||||
"""Test that JSON structures are preserved and not interpolated incorrectly."""
|
||||
template = """
|
||||
Instructions for {agent}:
|
||||
@@ -40,8 +40,8 @@ class TestInterpolateOnly:
|
||||
|
||||
{"name": "person's name", "age": 25, "skills": ["coding", "testing"]}
|
||||
"""
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"agent": "DevAgent"
|
||||
inputs: dict[str, str | int | float | dict[str, Any] | list[Any]] = {
|
||||
"agent": "DevAgent",
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
@@ -52,7 +52,7 @@ class TestInterpolateOnly:
|
||||
in result
|
||||
)
|
||||
|
||||
def test_complex_nested_json(self):
|
||||
def test_complex_nested_json(self) -> None:
|
||||
"""Test with complex JSON structures containing curly braces."""
|
||||
template = """
|
||||
{agent} needs to process:
|
||||
@@ -65,8 +65,8 @@ class TestInterpolateOnly:
|
||||
}
|
||||
}
|
||||
"""
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"agent": "DataProcessor"
|
||||
inputs: dict[str, str | int | float | dict[str, Any] | list[Any]] = {
|
||||
"agent": "DataProcessor",
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
@@ -76,11 +76,11 @@ class TestInterpolateOnly:
|
||||
assert '"value": 42' in result
|
||||
assert '[1, 2, {"inner": "value"}]' in result
|
||||
|
||||
def test_missing_variable(self):
|
||||
def test_missing_variable(self) -> None:
|
||||
"""Test that an error is raised when a required variable is missing."""
|
||||
template = "Hello, {name}!"
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"not_name": "Alice"
|
||||
inputs: dict[str, str | int | float | dict[str, Any] | list[Any]] = {
|
||||
"not_name": "Alice",
|
||||
}
|
||||
|
||||
with pytest.raises(KeyError) as excinfo:
|
||||
@@ -89,55 +89,55 @@ class TestInterpolateOnly:
|
||||
assert "template variable" in str(excinfo.value).lower()
|
||||
assert "name" in str(excinfo.value)
|
||||
|
||||
def test_invalid_input_types(self):
|
||||
def test_invalid_input_types(self) -> None:
|
||||
"""Test that an error is raised with invalid input types."""
|
||||
template = "Hello, {name}!"
|
||||
# Using Any for this test since we're intentionally testing an invalid type
|
||||
inputs: Dict[str, Any] = {"name": object()} # Object is not a valid input type
|
||||
inputs: dict[str, Any] = {"name": object()} # Object is not a valid input type
|
||||
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
interpolate_only(template, inputs)
|
||||
|
||||
assert "unsupported type" in str(excinfo.value).lower()
|
||||
|
||||
def test_empty_input_string(self):
|
||||
def test_empty_input_string(self) -> None:
|
||||
"""Test handling of empty or None input string."""
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"name": "Alice"
|
||||
inputs: dict[str, str | int | float | dict[str, Any] | list[Any]] = {
|
||||
"name": "Alice",
|
||||
}
|
||||
|
||||
assert interpolate_only("", inputs) == ""
|
||||
assert interpolate_only(None, inputs) == ""
|
||||
|
||||
def test_no_variables_in_template(self):
|
||||
def test_no_variables_in_template(self) -> None:
|
||||
"""Test a template with no variables to replace."""
|
||||
template = "This is a static string with no variables."
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"name": "Alice"
|
||||
inputs: dict[str, str | int | float | dict[str, Any] | list[Any]] = {
|
||||
"name": "Alice",
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
|
||||
assert result == template
|
||||
|
||||
def test_variable_name_starting_with_underscore(self):
|
||||
def test_variable_name_starting_with_underscore(self) -> None:
|
||||
"""Test variables starting with underscore are replaced correctly."""
|
||||
template = "Variable: {_special_var}"
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"_special_var": "Special Value"
|
||||
inputs: dict[str, str | int | float | dict[str, Any] | list[Any]] = {
|
||||
"_special_var": "Special Value",
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
|
||||
assert result == "Variable: Special Value"
|
||||
|
||||
def test_preserves_non_matching_braces(self):
|
||||
def test_preserves_non_matching_braces(self) -> None:
|
||||
"""Test that non-matching braces patterns are preserved."""
|
||||
template = (
|
||||
"This {123} and {!var} should not be replaced but {valid_var} should."
|
||||
)
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"valid_var": "works"
|
||||
inputs: dict[str, str | int | float | dict[str, Any] | list[Any]] = {
|
||||
"valid_var": "works",
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
@@ -146,15 +146,15 @@ class TestInterpolateOnly:
|
||||
result == "This {123} and {!var} should not be replaced but works should."
|
||||
)
|
||||
|
||||
def test_complex_mixed_scenario(self):
|
||||
def test_complex_mixed_scenario(self) -> None:
|
||||
"""Test a complex scenario with both valid variables and JSON structures."""
|
||||
template = """
|
||||
{agent_name} is working on task {task_id}.
|
||||
|
||||
|
||||
Instructions:
|
||||
1. Process the data
|
||||
2. Return results as:
|
||||
|
||||
|
||||
{
|
||||
"taskId": "{task_id}",
|
||||
"results": {
|
||||
@@ -164,7 +164,7 @@ class TestInterpolateOnly:
|
||||
}
|
||||
}
|
||||
"""
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
inputs: dict[str, str | int | float | dict[str, Any] | list[Any]] = {
|
||||
"agent_name": "AnalyticsAgent",
|
||||
"task_id": "T-12345",
|
||||
}
|
||||
@@ -176,10 +176,10 @@ class TestInterpolateOnly:
|
||||
assert '"processed_by": "agent_name"' in result # This shouldn't be replaced
|
||||
assert '"values": [1, 2, 3]' in result
|
||||
|
||||
def test_empty_inputs_dictionary(self):
|
||||
def test_empty_inputs_dictionary(self) -> None:
|
||||
"""Test that an error is raised with empty inputs dictionary."""
|
||||
template = "Hello, {name}!"
|
||||
inputs: Dict[str, Any] = {}
|
||||
inputs: dict[str, Any] = {}
|
||||
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
interpolate_only(template, inputs)
|
||||
|
||||
@@ -5,14 +5,14 @@ from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
|
||||
class InternalCrewTrainingHandler(unittest.TestCase):
|
||||
def setUp(self):
|
||||
def setUp(self) -> None:
|
||||
self.handler = CrewTrainingHandler("trained_data.pkl")
|
||||
|
||||
def tearDown(self):
|
||||
def tearDown(self) -> None:
|
||||
os.remove("trained_data.pkl")
|
||||
del self.handler
|
||||
|
||||
def test_save_trained_data(self):
|
||||
def test_save_trained_data(self) -> None:
|
||||
agent_id = "agent1"
|
||||
trained_data = {"param1": 1, "param2": 2}
|
||||
self.handler.save_trained_data(agent_id, trained_data)
|
||||
@@ -21,7 +21,7 @@ class InternalCrewTrainingHandler(unittest.TestCase):
|
||||
data = self.handler.load()
|
||||
assert data[agent_id] == trained_data
|
||||
|
||||
def test_append_existing_agent(self):
|
||||
def test_append_existing_agent(self) -> None:
|
||||
train_iteration = 1
|
||||
agent_id = "agent1"
|
||||
new_data = {"param3": 3, "param4": 4}
|
||||
@@ -31,7 +31,7 @@ class InternalCrewTrainingHandler(unittest.TestCase):
|
||||
data = self.handler.load()
|
||||
assert data[agent_id][train_iteration] == new_data
|
||||
|
||||
def test_append_new_agent(self):
|
||||
def test_append_new_agent(self) -> None:
|
||||
train_iteration = 1
|
||||
agent_id = "agent2"
|
||||
new_data = {"param5": 5, "param6": 6}
|
||||
|
||||
Reference in New Issue
Block a user