From 19f4ef09825d5db95fae6b027cf979a947cbd052 Mon Sep 17 00:00:00 2001 From: Greyson Lalonde Date: Fri, 7 Nov 2025 17:01:30 -0500 Subject: [PATCH] chore: remove decorator kwarg to ensure global conftest applies --- .../firecrawl_crawl_website_tool_test.py | 2 +- .../firecrawl_scrape_website_tool_test.py | 2 +- .../tests/tools/firecrawl_search_tool_test.py | 2 +- lib/crewai/tests/agents/test_agent.py | 98 +- lib/crewai/tests/agents/test_lite_agent.py | 24 +- .../test_output_json_hierarchical.yaml | 1157 ++++++++++++++++- lib/crewai/tests/events/__init__.py | 0 .../evaluation/test_agent_evaluator.py | 6 +- lib/crewai/tests/knowledge/test_knowledge.py | 6 +- lib/crewai/tests/llms/anthropic/__init__.py | 0 lib/crewai/tests/llms/bedrock/__init__.py | 0 lib/crewai/tests/llms/google/__init__.py | 0 .../llms/hooks/test_openai_interceptor.py | 6 +- lib/crewai/tests/llms/openai/__init__.py | 0 lib/crewai/tests/llms/openai/test_openai.py | 4 +- .../tests/memory/test_external_memory.py | 4 +- lib/crewai/tests/project/__init__.py | 0 lib/crewai/tests/rag/config/__init__.py | 0 lib/crewai/tests/telemetry/test_telemetry.py | 2 +- lib/crewai/tests/test_crew.py | 160 +-- lib/crewai/tests/test_custom_llm.py | 4 +- lib/crewai/tests/test_llm.py | 36 +- lib/crewai/tests/test_project.py | 8 +- lib/crewai/tests/test_task.py | 50 +- lib/crewai/tests/test_task_guardrails.py | 6 +- .../tools/agent_tools/test_agent_tools.py | 12 +- lib/crewai/tests/tools/test_base_tool.py | 2 +- .../tests/tools/test_structured_tool.py | 8 +- lib/crewai/tests/tracing/test_tracing.py | 18 +- lib/crewai/tests/utilities/test_converter.py | 6 +- lib/crewai/tests/utilities/test_events.py | 44 +- 31 files changed, 1352 insertions(+), 315 deletions(-) create mode 100644 lib/crewai/tests/events/__init__.py create mode 100644 lib/crewai/tests/llms/anthropic/__init__.py create mode 100644 lib/crewai/tests/llms/bedrock/__init__.py create mode 100644 lib/crewai/tests/llms/google/__init__.py create mode 100644 lib/crewai/tests/llms/openai/__init__.py create mode 100644 lib/crewai/tests/project/__init__.py create mode 100644 lib/crewai/tests/rag/config/__init__.py diff --git a/lib/crewai-tools/tests/tools/firecrawl_crawl_website_tool_test.py b/lib/crewai-tools/tests/tools/firecrawl_crawl_website_tool_test.py index 1590a4a52..41417874e 100644 --- a/lib/crewai-tools/tests/tools/firecrawl_crawl_website_tool_test.py +++ b/lib/crewai-tools/tests/tools/firecrawl_crawl_website_tool_test.py @@ -4,7 +4,7 @@ from crewai_tools.tools.firecrawl_crawl_website_tool.firecrawl_crawl_website_too FirecrawlCrawlWebsiteTool, ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_firecrawl_crawl_tool_integration(): tool = FirecrawlCrawlWebsiteTool(config={ "limit": 2, diff --git a/lib/crewai-tools/tests/tools/firecrawl_scrape_website_tool_test.py b/lib/crewai-tools/tests/tools/firecrawl_scrape_website_tool_test.py index 70f1cf2e1..6c0a05825 100644 --- a/lib/crewai-tools/tests/tools/firecrawl_scrape_website_tool_test.py +++ b/lib/crewai-tools/tests/tools/firecrawl_scrape_website_tool_test.py @@ -4,7 +4,7 @@ from crewai_tools.tools.firecrawl_scrape_website_tool.firecrawl_scrape_website_t FirecrawlScrapeWebsiteTool, ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_firecrawl_scrape_tool_integration(): tool = FirecrawlScrapeWebsiteTool() result = tool.run(url="https://firecrawl.dev") diff --git a/lib/crewai-tools/tests/tools/firecrawl_search_tool_test.py b/lib/crewai-tools/tests/tools/firecrawl_search_tool_test.py index e6294c084..217faa10b 100644 --- a/lib/crewai-tools/tests/tools/firecrawl_search_tool_test.py +++ b/lib/crewai-tools/tests/tools/firecrawl_search_tool_test.py @@ -3,7 +3,7 @@ import pytest from crewai_tools.tools.firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_firecrawl_search_tool_integration(): tool = FirecrawlSearchTool() result = tool.run(query="firecrawl") diff --git a/lib/crewai/tests/agents/test_agent.py b/lib/crewai/tests/agents/test_agent.py index 8f77e1ba4..c4ccd59ff 100644 --- a/lib/crewai/tests/agents/test_agent.py +++ b/lib/crewai/tests/agents/test_agent.py @@ -147,7 +147,7 @@ def test_custom_llm(): assert agent.llm.model == "gpt-4" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_execution(): agent = Agent( role="test role", @@ -166,7 +166,7 @@ def test_agent_execution(): assert output == "1 + 1 is 2" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_execution_with_tools(): @tool def multiplier(first_number: int, second_number: int) -> float: @@ -211,7 +211,7 @@ def test_agent_execution_with_tools(): assert received_events[0].tool_args == {"first_number": 3, "second_number": 4} -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_logging_tool_usage(): @tool def multiplier(first_number: int, second_number: int) -> float: @@ -245,7 +245,7 @@ def test_logging_tool_usage(): assert agent.tools_handler.last_used_tool.arguments == tool_usage.arguments -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_cache_hitting(): @tool def multiplier(first_number: int, second_number: int) -> float: @@ -330,7 +330,7 @@ def test_cache_hitting(): assert received_events[0].from_cache -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_disabling_cache_for_agent(): @tool def multiplier(first_number: int, second_number: int) -> float: @@ -394,7 +394,7 @@ def test_disabling_cache_for_agent(): read.assert_not_called() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_execution_with_specific_tools(): @tool def multiplier(first_number: int, second_number: int) -> float: @@ -417,7 +417,7 @@ def test_agent_execution_with_specific_tools(): assert output == "The result of the multiplication is 12." -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_powered_by_new_o_model_family_that_allows_skipping_tool(): @tool def multiplier(first_number: int, second_number: int) -> float: @@ -443,7 +443,7 @@ def test_agent_powered_by_new_o_model_family_that_allows_skipping_tool(): assert output == "12" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_powered_by_new_o_model_family_that_uses_tool(): @tool def comapny_customer_data() -> str: @@ -469,7 +469,7 @@ def test_agent_powered_by_new_o_model_family_that_uses_tool(): assert output == "42" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_custom_max_iterations(): @tool def get_final_answer() -> float: @@ -511,7 +511,7 @@ def test_agent_custom_max_iterations(): assert call_count == 3 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_repeated_tool_usage(capsys): """Test that agents handle repeated tool usage appropriately. @@ -560,7 +560,7 @@ def test_agent_repeated_tool_usage(capsys): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys): @tool def get_final_answer(anything: str) -> float: @@ -603,7 +603,7 @@ def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_moved_on_after_max_iterations(): @tool def get_final_answer() -> float: @@ -630,7 +630,7 @@ def test_agent_moved_on_after_max_iterations(): assert output == "42" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_respect_the_max_rpm_set(capsys): @tool def get_final_answer() -> float: @@ -664,7 +664,7 @@ def test_agent_respect_the_max_rpm_set(capsys): moveon.assert_called() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys): from unittest.mock import patch @@ -702,7 +702,7 @@ def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys): moveon.assert_not_called() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_without_max_rpm_respects_crew_rpm(capsys): from unittest.mock import patch @@ -762,7 +762,7 @@ def test_agent_without_max_rpm_respects_crew_rpm(capsys): moveon.assert_called_once() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_error_on_parsing_tool(capsys): from unittest.mock import patch @@ -805,7 +805,7 @@ def test_agent_error_on_parsing_tool(capsys): assert "Error on parsing tool." in captured.out -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_remembers_output_format_after_using_tools_too_many_times(): from unittest.mock import patch @@ -840,7 +840,7 @@ def test_agent_remembers_output_format_after_using_tools_too_many_times(): remember_format.assert_called() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_use_specific_tasks_output_as_context(capsys): agent1 = Agent(role="test role", goal="test goal", backstory="test backstory") agent2 = Agent(role="test role2", goal="test goal2", backstory="test backstory2") @@ -867,7 +867,7 @@ def test_agent_use_specific_tasks_output_as_context(capsys): assert "hi" in result.raw.lower() or "hello" in result.raw.lower() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_step_callback(): class StepCallback: def callback(self, step): @@ -901,7 +901,7 @@ def test_agent_step_callback(): callback.assert_called() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_function_calling_llm(): from crewai.llm import LLM llm = LLM(model="gpt-4o", is_litellm=True) @@ -948,7 +948,7 @@ def test_agent_function_calling_llm(): mock_original_tool_calling.assert_called() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_tool_result_as_answer_is_the_final_answer_for_the_agent(): from crewai.tools import BaseTool @@ -978,7 +978,7 @@ def test_tool_result_as_answer_is_the_final_answer_for_the_agent(): assert result.raw == "Howdy!" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_tool_usage_information_is_appended_to_agent(): from crewai.tools import BaseTool @@ -1033,7 +1033,7 @@ def test_agent_definition_based_on_dict(): # test for human input -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_human_input(): # Agent configuration config = { @@ -1181,7 +1181,7 @@ Thought:<|eot_id|> assert mock_format_prompt.return_value == expected_prompt -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_task_allow_crewai_trigger_context(): from crewai import Crew @@ -1202,7 +1202,7 @@ def test_task_allow_crewai_trigger_context(): assert "Trigger Payload: Important context data" in prompt -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_task_without_allow_crewai_trigger_context(): from crewai import Crew @@ -1225,7 +1225,7 @@ def test_task_without_allow_crewai_trigger_context(): assert "Important context data" not in prompt -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_task_allow_crewai_trigger_context_no_payload(): from crewai import Crew @@ -1247,7 +1247,7 @@ def test_task_allow_crewai_trigger_context_no_payload(): assert "Trigger Payload:" not in prompt -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_do_not_allow_crewai_trigger_context_for_first_task_hierarchical(): from crewai import Crew @@ -1276,7 +1276,7 @@ def test_do_not_allow_crewai_trigger_context_for_first_task_hierarchical(): assert "Trigger Payload: Initial context data" not in first_prompt -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_first_task_auto_inject_trigger(): from crewai import Crew @@ -1309,7 +1309,7 @@ def test_first_task_auto_inject_trigger(): assert "Trigger Payload:" not in second_prompt -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_ensure_first_task_allow_crewai_trigger_context_is_false_does_not_inject(): from crewai import Crew @@ -1514,7 +1514,7 @@ def test_agent_with_additional_kwargs(): assert agent.llm.frequency_penalty == 0.1 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_call(): llm = LLM(model="gpt-3.5-turbo") messages = [{"role": "user", "content": "Say 'Hello, World!'"}] @@ -1523,7 +1523,7 @@ def test_llm_call(): assert "Hello, World!" in response -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_call_with_error(): llm = LLM(model="non-existent-model") messages = [{"role": "user", "content": "This should fail"}] @@ -1532,7 +1532,7 @@ def test_llm_call_with_error(): llm.call(messages) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_handle_context_length_exceeds_limit(): # Import necessary modules from crewai.utilities.agent_utils import handle_context_length @@ -1585,7 +1585,7 @@ def test_handle_context_length_exceeds_limit(): mock_summarize.assert_called_once() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_handle_context_length_exceeds_limit_cli_no(): agent = Agent( role="test role", @@ -1660,7 +1660,7 @@ def test_agent_with_all_llm_attributes(): assert agent.llm.api_key == "sk-your-api-key-here" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_call_with_all_attributes(): llm = LLM( model="gpt-3.5-turbo", @@ -1677,7 +1677,7 @@ def test_llm_call_with_all_attributes(): assert "STOP" not in response -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_with_ollama_llama3(): agent = Agent( role="test role", @@ -1698,7 +1698,7 @@ def test_agent_with_ollama_llama3(): assert "Llama3" in response or "AI" in response or "language model" in response -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_call_with_ollama_llama3(): llm = LLM( model="ollama/llama3.2:3b", @@ -1717,7 +1717,7 @@ def test_llm_call_with_ollama_llama3(): assert "Llama3" in response or "AI" in response or "language model" in response -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_execute_task_basic(): agent = Agent( role="test role", @@ -1736,7 +1736,7 @@ def test_agent_execute_task_basic(): assert "4" in result -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_execute_task_with_context(): agent = Agent( role="test role", @@ -1758,7 +1758,7 @@ def test_agent_execute_task_with_context(): assert "fox" in result.lower() and "dog" in result.lower() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_execute_task_with_tool(): @tool def dummy_tool(query: str) -> str: @@ -1783,7 +1783,7 @@ def test_agent_execute_task_with_tool(): assert "Dummy result for: test query" in result -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_execute_task_with_custom_llm(): agent = Agent( role="test role", @@ -1804,7 +1804,7 @@ def test_agent_execute_task_with_custom_llm(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_execute_task_with_ollama(): agent = Agent( role="test role", @@ -1824,7 +1824,7 @@ def test_agent_execute_task_with_ollama(): assert "AI" in result or "artificial intelligence" in result.lower() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_with_knowledge_sources(): content = "Brandon's favorite color is red and he likes Mexican food." string_source = StringKnowledgeSource(content=content) @@ -1856,7 +1856,7 @@ def test_agent_with_knowledge_sources(): assert "red" in result.raw.lower() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold(): content = "Brandon's favorite color is red and he likes Mexican food." string_source = StringKnowledgeSource(content=content) @@ -1904,7 +1904,7 @@ def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold_default(): content = "Brandon's favorite color is red and he likes Mexican food." string_source = StringKnowledgeSource(content=content) @@ -1953,7 +1953,7 @@ def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold_defau ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_with_knowledge_sources_extensive_role(): content = "Brandon's favorite color is red and he likes Mexican food." string_source = StringKnowledgeSource(content=content) @@ -1989,7 +1989,7 @@ def test_agent_with_knowledge_sources_extensive_role(): assert "red" in result.raw.lower() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_with_knowledge_sources_works_with_copy(): content = "Brandon's favorite color is red and he likes Mexican food." string_source = StringKnowledgeSource(content=content) @@ -2028,7 +2028,7 @@ def test_agent_with_knowledge_sources_works_with_copy(): assert isinstance(agent_copy.llm, BaseLLM) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_with_knowledge_sources_generate_search_query(): content = "Brandon's favorite color is red and he likes Mexican food." string_source = StringKnowledgeSource(content=content) @@ -2162,7 +2162,7 @@ def test_agent_knowledege_with_crewai_knowledge(): crew_knowledge.query.assert_called_once() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_litellm_auth_error_handling(): """Test that LiteLLM authentication errors are handled correctly and not retried.""" from litellm import AuthenticationError as LiteLLMAuthenticationError @@ -2291,7 +2291,7 @@ def test_litellm_anthropic_error_handling(): mock_llm_call.assert_called_once() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_get_knowledge_search_query(): """Test that _get_knowledge_search_query calls the LLM with the correct prompts.""" from crewai.utilities.i18n import I18N diff --git a/lib/crewai/tests/agents/test_lite_agent.py b/lib/crewai/tests/agents/test_lite_agent.py index 0c6b00c23..c51fb5c0d 100644 --- a/lib/crewai/tests/agents/test_lite_agent.py +++ b/lib/crewai/tests/agents/test_lite_agent.py @@ -70,7 +70,7 @@ class ResearchResult(BaseModel): sources: list[str] = Field(description="List of sources used") -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() @pytest.mark.parametrize("verbose", [True, False]) def test_lite_agent_created_with_correct_parameters(monkeypatch, verbose): """Test that LiteAgent is created with the correct parameters when Agent.kickoff() is called.""" @@ -130,7 +130,7 @@ def test_lite_agent_created_with_correct_parameters(monkeypatch, verbose): assert created_lite_agent["response_format"] == TestResponse -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_lite_agent_with_tools(): """Test that Agent can use tools.""" # Create a LiteAgent with tools @@ -174,7 +174,7 @@ def test_lite_agent_with_tools(): assert event.tool_name == "search_web" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_lite_agent_structured_output(): """Test that Agent can return a simple structured output.""" @@ -217,7 +217,7 @@ def test_lite_agent_structured_output(): return result -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_lite_agent_returns_usage_metrics(): """Test that LiteAgent returns usage metrics.""" llm = LLM(model="gpt-4o-mini") @@ -238,7 +238,7 @@ def test_lite_agent_returns_usage_metrics(): assert result.usage_metrics["total_tokens"] > 0 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() @pytest.mark.asyncio async def test_lite_agent_returns_usage_metrics_async(): """Test that LiteAgent returns usage metrics when run asynchronously.""" @@ -333,7 +333,7 @@ def test_sets_parent_flow_when_inside_flow(): assert captured_agent.parent_flow is flow -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_guardrail_is_called_using_string(): guardrail_events = defaultdict(list) from crewai.events.event_types import ( @@ -387,7 +387,7 @@ def test_guardrail_is_called_using_string(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_guardrail_is_called_using_callable(): guardrail_events = defaultdict(list) from crewai.events.event_types import ( @@ -433,7 +433,7 @@ def test_guardrail_is_called_using_callable(): assert "Pelé - Santos, 1958" in result.raw -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_guardrail_reached_attempt_limit(): guardrail_events = defaultdict(list) from crewai.events.event_types import ( @@ -487,7 +487,7 @@ def test_guardrail_reached_attempt_limit(): assert not guardrail_events["completed"][2].success -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_output_when_guardrail_returns_base_model(): class Player(BaseModel): name: str @@ -578,7 +578,7 @@ def test_lite_agent_with_custom_llm_and_guardrails(): assert result2.raw == "Modified by guardrail" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_lite_agent_with_invalid_llm(): """Test that LiteAgent raises proper error when create_llm returns None.""" with patch("crewai.lite_agent.create_llm", return_value=None): @@ -594,7 +594,7 @@ def test_lite_agent_with_invalid_llm(): @patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}) @patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get") -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_kickoff_with_platform_tools(mock_get): """Test that Agent.kickoff() properly integrates platform tools with LiteAgent""" mock_response = Mock() @@ -636,7 +636,7 @@ def test_agent_kickoff_with_platform_tools(mock_get): @patch.dict("os.environ", {"EXA_API_KEY": "test_exa_key"}) @patch("crewai.agent.Agent._get_external_mcp_tools") -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_kickoff_with_mcp_tools(mock_get_mcp_tools): """Test that Agent.kickoff() properly integrates MCP tools with LiteAgent""" # Setup mock MCP tools - create a proper BaseTool instance diff --git a/lib/crewai/tests/cassettes/test_output_json_hierarchical.yaml b/lib/crewai/tests/cassettes/test_output_json_hierarchical.yaml index a9c905ac1..9e899b031 100644 --- a/lib/crewai/tests/cassettes/test_output_json_hierarchical.yaml +++ b/lib/crewai/tests/cassettes/test_output_json_hierarchical.yaml @@ -60,17 +60,17 @@ interactions: user-agent: - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - 1.109.1 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: @@ -98,7 +98,7 @@ interactions: weX9vb5FqmdqdBz9AAAA//8DAIPrE9IlBgAA headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Encoding: @@ -110,29 +110,25 @@ interactions: Server: - cloudflare Set-Cookie: - - __cf_bm=REDACTED; - path=/; expires=Wed, 05-Nov-25 22:41:14 GMT; domain=.api.openai.com; HttpOnly; - Secure; SameSite=None - - _cfuvid=REDACTED; - path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + - SET-COOKIE-XXX Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - '5689' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' x-envoy-upstream-service-time: @@ -140,19 +136,19 @@ interactions: x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '30000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '28535' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 2.928s + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK @@ -188,17 +184,17 @@ interactions: user-agent: - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - 1.109.1 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: @@ -226,7 +222,7 @@ interactions: bMPm/nZd3d9t3Ox19jcAAAD//wMA9SeHNpMFAAA= headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Encoding: @@ -238,29 +234,25 @@ interactions: Server: - cloudflare Set-Cookie: - - __cf_bm=REDACTED; - path=/; expires=Wed, 05-Nov-25 22:41:17 GMT; domain=.api.openai.com; HttpOnly; - Secure; SameSite=None - - _cfuvid=REDACTED; - path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + - SET-COOKIE-XXX Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - '2273' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' x-envoy-upstream-service-time: @@ -268,19 +260,19 @@ interactions: x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199720' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 84ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK @@ -357,24 +349,23 @@ interactions: content-type: - application/json cookie: - - __cf_bm=REDACTED; - _cfuvid=REDACTED + - COOKIE-XXX host: - api.openai.com user-agent: - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - 1.109.1 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: @@ -396,7 +387,7 @@ interactions: 8ndKUsWKfFxFTY0Y9OmyMPwKTH3VKNOSd16dzqtxldgWhXwtqMkwOSZ/AAAA//8DAA4vDfxnAwAA headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Encoding: @@ -408,23 +399,23 @@ interactions: Server: - cloudflare Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - '622' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' x-envoy-upstream-service-time: @@ -432,19 +423,1065 @@ interactions: x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '30000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '28885' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 2.23s + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"trace_id": "bdc2d87d-f10b-421b-b77f-efcae3c4a908", "execution_type": + "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, + "crew_name": "crew", "flow_name": null, "crewai_version": "1.4.0", "privacy_level": + "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": + 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-07T20:51:23.266995+00:00"}, + "ephemeral_trace_id": "bdc2d87d-f10b-421b-b77f-efcae3c4a908"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '488' + Content-Type: + - application/json + User-Agent: + - CrewAI-CLI/1.4.0 + X-Crewai-Version: + - 1.4.0 + authorization: + - AUTHORIZATION-XXX + method: POST + uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches + response: + body: + string: '{"id":"7b442078-edc0-4083-b2d5-628c042040b7","ephemeral_trace_id":"bdc2d87d-f10b-421b-b77f-efcae3c4a908","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.4.0","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.4.0","privacy_level":"standard"},"created_at":"2025-11-07T20:51:23.579Z","updated_at":"2025-11-07T20:51:23.579Z","access_code":"TRACE-4823dbe416","user_identifier":null}' + headers: + Connection: + - keep-alive + Content-Length: + - '515' + Content-Type: + - application/json; charset=utf-8 + Date: + - Fri, 07 Nov 2025 20:51:23 GMT + cache-control: + - no-store + content-security-policy: + - CSP-FILTERED + etag: + - ETAG-XXX + expires: + - '0' + permissions-policy: + - PERMISSIONS-POLICY-XXX + pragma: + - no-cache + referrer-policy: + - REFERRER-POLICY-XXX + strict-transport-security: + - STS-XXX + vary: + - Accept + x-content-type-options: + - X-CONTENT-TYPE-XXX + x-frame-options: + - X-FRAME-OPTIONS-XXX + x-permitted-cross-domain-policies: + - X-PERMITTED-XXX + x-request-id: + - X-REQUEST-ID-XXX + x-runtime: + - X-RUNTIME-XXX + x-xss-protection: + - X-XSS-PROTECTION-XXX + status: + code: 201 + message: Created +- request: + body: '{"messages":[{"role":"system","content":"You are Crew Manager. You are + a seasoned manager with a knack for getting the best out of your team.\nYou + are also known for your ability to delegate work to the right people, and to + ask the right questions to get the best out of your team.\nEven though you don''t + perform tasks by yourself, you have a lot of experience in the field, which + allows you to properly evaluate the work of your team members.\nYour personal + goal is: Manage the team to complete the task in the best way possible.\nYou + ONLY have access to the following tools, and should NEVER make up tools that + are not listed here:\n\nTool Name: Delegate work to coworker\nTool Arguments: + {''task'': {''description'': ''The task to delegate'', ''type'': ''str''}, ''context'': + {''description'': ''The context for the task'', ''type'': ''str''}, ''coworker'': + {''description'': ''The role/name of the coworker to delegate to'', ''type'': + ''str''}}\nTool Description: Delegate a specific task to one of the following + coworkers: Scorer\nThe input to this tool should be the coworker, the task you + want them to do, and ALL necessary context to execute the task, they know nothing + about the task, so share absolutely everything you know, don''t reference things + but instead explain them.\nTool Name: Ask question to coworker\nTool Arguments: + {''question'': {''description'': ''The question to ask'', ''type'': ''str''}, + ''context'': {''description'': ''The context for the question'', ''type'': ''str''}, + ''coworker'': {''description'': ''The role/name of the coworker to ask'', ''type'': + ''str''}}\nTool Description: Ask a specific question to one of the following + coworkers: Scorer\nThe input to this tool should be the coworker, the question + you have for them, and ALL necessary context to ask the question properly, they + know nothing about the question, so share absolutely everything you know, don''t + reference things but instead explain them.\n\nIMPORTANT: Use the following format + in your response:\n\n```\nThought: you should always think about what to do\nAction: + the action to take, only one name of [Delegate work to coworker, Ask question + to coworker], just the name, exactly as it''s written.\nAction Input: the input + to the action, just a simple JSON object, enclosed in curly braces, using \" + to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce + all necessary information is gathered, return the following format:\n\n```\nThought: + I now know the final answer\nFinal Answer: the final answer to the original + input question\n```"},{"role":"user","content":"\nCurrent Task: Give me an integer + score between 1-5 for the following title: ''The impact of AI in the future + of work''\n\nThis is the expected criteria for your final answer: The score + of the title.\nyou MUST return the actual complete content as the final answer, + not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI + schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": + \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '3433' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.109.1 + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.109.1 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//nFTBbuM2EL37Kwa8ZAvIhm042axv6e7FPbQBGiDo1gtjTI0krmkOQY7s + BEH+vSAlW3Z3D0UvgsA3M3zzZvjeRgDKlGoJSjcoeu/t+PPX383zH7/61Z2Or9PF57++HHZfn397 + fH6h+VwVKYO330nLKWuiee8tiWHXwToQCqWqs49389vF/d39IgN7LsmmtNrLeMHj+XS+GE/vx9O7 + PrFhoymqJfw9AgB4y99E0ZX0opYwLU4ne4oRa1LLcxCACmzTicIYTRR0oooB1OyEXGb91HBbN7KE + FTiiEoShJgF0YJxQTQGi5kBQcQBpCMSIJbh5agjM3qMW4AoeVmBchqtW2kDp7MhhdwNbjFQCO0CI + Gi1BFXgPs3TN7QSeGEoGaUwsYAWx4daWUJKlGoXyOQjGXYrmNsCfiUko4NgwmAiBomcXzdZ29OiA + tkUxru5YRrBm15WZrN3aPeg0lyV8OV2QKKbamtMfhVMIrJxvZQlva5WuX6slrNVj4IMp6UdltiRH + Igez8e3/VGkCKbCr1osQqLKkBRo+9hWq1gK6hFg6oJOLe0xMhQNZzOxTR20IlGICuTLmPHrxpCVH + xDPPayIF6KRnSSFpiDElRIitbgAjaIvByGsB5GqsaU9OilxZ2BuNNvHkIOg0TdaqgHW3Zi/S6fdM + N4EuZ4Q9+8TFSDw1pikX7WVrYxeaxEl/8TUK7U9b9MESRhkE+iXvFXzY89XpBB5TIJ3by733/aTm + z1IWmcnQIHhOD8Wg7VrN4xBAa2oX4WikOUudYHRitPHpxZ+k7SfQDz6p7C2mHquKdIrbvsLD6ixY + v4hZsW7d1+r98ukGqtqIyTlca+0FgM5xP95kGt965P1sE5ZrH3gb/5WqKuNMbDaBMLJLlhCFvcro + +wjgW7aj9sphlA+897IR3lG+7uNi0dVTgwEO6Hzam5USFrQD8OmcdlVwU5KgsfHC0ZRG3VA5pA72 + h21p+AIYXbT9I52f1e5aN67+L+UHQGvyQuXGByqNvm55CAv0PQ/652FnmTNhFSkcjKaNGAppFCVV + 2NrOu1W3+5vKuJqCD6Yz8MpvtrPFYj7/NMN7NXof/QMAAP//AwD4pWqWyQYAAA== + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 07 Nov 2025 20:51:30 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '6681' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '6822' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo + give my best complete final answer to the task respond using the exact following + format:\n\nThought: I now can give a great answer\nFinal Answer: Your final + answer must be the great and the most complete as possible, it must be outcome + described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent + Task: Provide an integer score between 1-5 for the title ''The impact of AI + in the future of work''. The score should reflect how impactful and relevant + the title is in relation to current trends and expectations for the future of + work, considering aspects such as clarity, engagement, and topical importance.\n\nThis + is the expected criteria for your final answer: Your best answer to your coworker + asking you this, accounting for the context shared.\nyou MUST return the actual + complete content as the final answer, not a summary.\n\nThis is the context + you''re working with:\nWe''re evaluating a title for its relevance and impact + using a scoring system from 1 (least impactful) to 5 (most impactful). Please + consider the clarity of the title, its engagement potential, and how it aligns + with current and anticipated future trends in the workplace affected by AI.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1516' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.109.1 + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.109.1 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//jFRNbxtHDL3rVxB7yUUSLEFybd3SAEF9aAs0bgqkDQRqhrvLej62JFeK + EPi/FzNSLKVNgV4WmHl85CO5bz5PABr2zQYa16O5OITZmw8/sX//268/fvhh9X3sF2/fvd+vfxnf + 3ez/etM108LIuz/J2RfW3OU4BDLO6QQ7ITQqWRff3S7Xq7vb+0UFYvYUCq0bbLaaL2aRE8+WN8v1 + 7GY1W6zO9D6zI2028PsEAOBz/RahydOnZgM30y83kVSxo2bzEgTQSA7lpkFVVsNkzfQCupyMUtX+ + 2Oex620DD5DyARwm6HhPgNCVBgCTHkj+SG85YYDX9VSCD3kMHtRlIbCewNgCwavHnoDjgM4gt/D6 + AThVuB1tFCp3hyxPrwBhPYfHFx4ruEAoU/As5CwcAb0XUuXUAYJxpHKXPPTc9eEIQoH2mAwsD+ym + 4HJS9iQlvs8HQDFu2TEG4GQUAneUXK3kBSMaOww1j/Y4FFYRBpT2LDlFSqa1HH0ayBmWveocHqwk + oNRhVyg7cjgqARsMkuNgCm4Uzsp2BNzl0WrzrsfUkZZxHDgE2BWV05qeDTBwlxQGkvbU+YGtL3mE + kkEX8g4DeFY3qhYVlWdCySug5LGcLhMuXbRZHE0BR8uxKj/Vqtsw6qTezeHnPQmGML3aH7VFA+/L + sF2OcUzs0EiBTaHNbjxV5zhkMUylSsSnMgo22JMcz8tvxwBtFsDRcxm71spCauTLL3EWG3CXBSLK + Exn4Y8LITufXf6pQOyoWu6QxhCsAU8rntRSPfDwjzy+uCLkbJO/0H9Sm5cTab4VQcyoOUMtDU9Hn + CcDH6r7xK0M1p+VuLT9RLbe8X5/yNRfXX9DF4v6MWjYMF2C1WE2/kXDryZCDXhm4ceh68hfqxe1l + pPkKmFy1/W8538p9ap1T93/SXwDnaDDy20HIs/u65UuYUHkV/yvsZcxVcKMke3a0NSYpq/DU4hhO + T1WjRzWK25ZTRzIIn96rdtiu3PJuvWjvbpfN5HnyNwAAAP//AwAWgXkHvgUAAA== + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 07 Nov 2025 20:51:33 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '2194' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '2391' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Crew Manager. You are + a seasoned manager with a knack for getting the best out of your team.\nYou + are also known for your ability to delegate work to the right people, and to + ask the right questions to get the best out of your team.\nEven though you don''t + perform tasks by yourself, you have a lot of experience in the field, which + allows you to properly evaluate the work of your team members.\nYour personal + goal is: Manage the team to complete the task in the best way possible.\nYou + ONLY have access to the following tools, and should NEVER make up tools that + are not listed here:\n\nTool Name: Delegate work to coworker\nTool Arguments: + {''task'': {''description'': ''The task to delegate'', ''type'': ''str''}, ''context'': + {''description'': ''The context for the task'', ''type'': ''str''}, ''coworker'': + {''description'': ''The role/name of the coworker to delegate to'', ''type'': + ''str''}}\nTool Description: Delegate a specific task to one of the following + coworkers: Scorer\nThe input to this tool should be the coworker, the task you + want them to do, and ALL necessary context to execute the task, they know nothing + about the task, so share absolutely everything you know, don''t reference things + but instead explain them.\nTool Name: Ask question to coworker\nTool Arguments: + {''question'': {''description'': ''The question to ask'', ''type'': ''str''}, + ''context'': {''description'': ''The context for the question'', ''type'': ''str''}, + ''coworker'': {''description'': ''The role/name of the coworker to ask'', ''type'': + ''str''}}\nTool Description: Ask a specific question to one of the following + coworkers: Scorer\nThe input to this tool should be the coworker, the question + you have for them, and ALL necessary context to ask the question properly, they + know nothing about the question, so share absolutely everything you know, don''t + reference things but instead explain them.\n\nIMPORTANT: Use the following format + in your response:\n\n```\nThought: you should always think about what to do\nAction: + the action to take, only one name of [Delegate work to coworker, Ask question + to coworker], just the name, exactly as it''s written.\nAction Input: the input + to the action, just a simple JSON object, enclosed in curly braces, using \" + to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce + all necessary information is gathered, return the following format:\n\n```\nThought: + I now know the final answer\nFinal Answer: the final answer to the original + input question\n```"},{"role":"user","content":"\nCurrent Task: Give me an integer + score between 1-5 for the following title: ''The impact of AI in the future + of work''\n\nThis is the expected criteria for your final answer: The score + of the title.\nyou MUST return the actual complete content as the final answer, + not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI + schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": + \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"},{"role":"assistant","content":"Thought: + I need to get an integer score for the title ''The impact of AI in the future + of work'' based on a scale from 1 to 5. To do this, I should delegate this task + to our Scorer, who is responsible for evaluating titles like this.\n\nAction: + Delegate work to coworker\nAction Input: {\"task\": \"Provide an integer score + between 1-5 for the title ''The impact of AI in the future of work''. The score + should reflect how impactful and relevant the title is in relation to current + trends and expectations for the future of work, considering aspects such as + clarity, engagement, and topical importance.\", \"context\": \"We''re evaluating + a title for its relevance and impact using a scoring system from 1 (least impactful) + to 5 (most impactful). Please consider the clarity of the title, its engagement + potential, and how it aligns with current and anticipated future trends in the + workplace affected by AI.\", \"coworker\": \"Scorer\"}\nObservation: I would + score the title ''The impact of AI in the future of work'' a 5. The title is + clear, directly addressing a timely and highly relevant topic, considering how + artificial intelligence is dramatically reshaping work environments and expectations. + It is engaging because it prompts curiosity about the changes AI will bring, + and it aligns perfectly with current global discussions and trends around the + future workforce, automation, and AI integration. Overall, the title effectively + communicates its focus and importance, making it very impactful for audiences + interested in future labor market dynamics."}],"model":"gpt-4o"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '5019' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.109.1 + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.109.1 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//jFJBbtswELzrFcSerUB2LdnWLSgQpEDRU9FD6kCgqZXEmiIZcuW0Nfz3 + grRjKWkK9EKAnJ3hzO4eE8ZA1lAyEB0n0VuVfnz4Ipv9Qj/c/5ZPd0/Z/bdDvbn9TIeu2FiYBYbZ + /UBBL6wbYXqrkKTRZ1g45IRBdb4qFvlyXWw+RKA3NapAay2lS5MussUyzdZpVlyInZECPZTse8IY + Y8d4Bou6xp9Qsmz28tKj97xFKK9FjIEzKrwA91564ppgNoLCaEIdXX/tzNB2VLJPTJtntg8Hdcga + qbliXPtndDdbfRevt/FasuMWvDAOt1Cy/DRVdtgMnodgelBqAnCtDfHQmJjp8YKcrimUaa0zO/+G + Co3U0neVQ+6NDo49GQsRPSWMPcZuDa8aANaZ3lJFZo/xu3mWF2dBGAc0gdcXkAxxNaWtlrN3FKsa + iUvlJx0HwUWH9cgdx8OHWpoJkExy/23nPe1zdqnb/5EfASHQEtaVdVhL8TryWOYwLPC/yq59jobB + oztIgRVJdGEWNTZ8UOfdAv/LE/ZVI3WLzjp5XrDGVmLXzFfrPC9WkJySPwAAAP//AwDFP8S9aQMA + AA== + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 07 Nov 2025 20:51:34 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '603' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '626' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"trace_id": "037a11f7-d961-4c08-a000-0cda035d3159", "execution_type": + "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, + "crew_name": "crew", "flow_name": null, "crewai_version": "1.4.0", "privacy_level": + "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": + 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-07T21:02:04.522932+00:00"}, + "ephemeral_trace_id": "037a11f7-d961-4c08-a000-0cda035d3159"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '488' + Content-Type: + - application/json + User-Agent: + - CrewAI-CLI/1.4.0 + X-Crewai-Version: + - 1.4.0 + authorization: + - AUTHORIZATION-XXX + method: POST + uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches + response: + body: + string: '{"id":"86012cb0-2c2f-40ac-b1fc-ca88b254c74b","ephemeral_trace_id":"037a11f7-d961-4c08-a000-0cda035d3159","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.4.0","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.4.0","privacy_level":"standard"},"created_at":"2025-11-07T21:02:04.880Z","updated_at":"2025-11-07T21:02:04.880Z","access_code":"TRACE-e9a840c256","user_identifier":null}' + headers: + Connection: + - keep-alive + Content-Length: + - '515' + Content-Type: + - application/json; charset=utf-8 + Date: + - Fri, 07 Nov 2025 21:02:04 GMT + cache-control: + - no-store + content-security-policy: + - CSP-FILTERED + etag: + - ETAG-XXX + expires: + - '0' + permissions-policy: + - PERMISSIONS-POLICY-XXX + pragma: + - no-cache + referrer-policy: + - REFERRER-POLICY-XXX + strict-transport-security: + - STS-XXX + vary: + - Accept + x-content-type-options: + - X-CONTENT-TYPE-XXX + x-frame-options: + - X-FRAME-OPTIONS-XXX + x-permitted-cross-domain-policies: + - X-PERMITTED-XXX + x-request-id: + - X-REQUEST-ID-XXX + x-runtime: + - X-RUNTIME-XXX + x-xss-protection: + - X-XSS-PROTECTION-XXX + status: + code: 201 + message: Created +- request: + body: '{"messages":[{"role":"system","content":"You are Crew Manager. You are + a seasoned manager with a knack for getting the best out of your team.\nYou + are also known for your ability to delegate work to the right people, and to + ask the right questions to get the best out of your team.\nEven though you don''t + perform tasks by yourself, you have a lot of experience in the field, which + allows you to properly evaluate the work of your team members.\nYour personal + goal is: Manage the team to complete the task in the best way possible.\nYou + ONLY have access to the following tools, and should NEVER make up tools that + are not listed here:\n\nTool Name: Delegate work to coworker\nTool Arguments: + {''task'': {''description'': ''The task to delegate'', ''type'': ''str''}, ''context'': + {''description'': ''The context for the task'', ''type'': ''str''}, ''coworker'': + {''description'': ''The role/name of the coworker to delegate to'', ''type'': + ''str''}}\nTool Description: Delegate a specific task to one of the following + coworkers: Scorer\nThe input to this tool should be the coworker, the task you + want them to do, and ALL necessary context to execute the task, they know nothing + about the task, so share absolutely everything you know, don''t reference things + but instead explain them.\nTool Name: Ask question to coworker\nTool Arguments: + {''question'': {''description'': ''The question to ask'', ''type'': ''str''}, + ''context'': {''description'': ''The context for the question'', ''type'': ''str''}, + ''coworker'': {''description'': ''The role/name of the coworker to ask'', ''type'': + ''str''}}\nTool Description: Ask a specific question to one of the following + coworkers: Scorer\nThe input to this tool should be the coworker, the question + you have for them, and ALL necessary context to ask the question properly, they + know nothing about the question, so share absolutely everything you know, don''t + reference things but instead explain them.\n\nIMPORTANT: Use the following format + in your response:\n\n```\nThought: you should always think about what to do\nAction: + the action to take, only one name of [Delegate work to coworker, Ask question + to coworker], just the name, exactly as it''s written.\nAction Input: the input + to the action, just a simple JSON object, enclosed in curly braces, using \" + to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce + all necessary information is gathered, return the following format:\n\n```\nThought: + I now know the final answer\nFinal Answer: the final answer to the original + input question\n```"},{"role":"user","content":"\nCurrent Task: Give me an integer + score between 1-5 for the following title: ''The impact of AI in the future + of work''\n\nThis is the expected criteria for your final answer: The score + of the title.\nyou MUST return the actual complete content as the final answer, + not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI + schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": + \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '3433' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.109.1 + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.109.1 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//lFRNbxoxEL3zK0a+9AIIEggJt35IbS699CP9oELGO7s7jdfj2rMkKMp/ + r+wFlvRDai9o5Tfz5r1nxg8DAEWFWoIytRbTeDt6+eVt/HFzM/s04w8fP9+8mNzNF9NXb27Enr89 + V8PUwZvvaOTQNTbceItC7DrYBNSCiXW6uDibz67Oz2YZaLhAm9oqL6MZj84mZ7PR5HI0udg31kwG + o1rC1wEAwEP+TRJdgfdqCZPh4aTBGHWFanksAlCBbTpROkaKop2oYQ8adoIuq35fc1vVsoRrcIgF + CEOBFistCFIjiI63wCVEw4Fc1Z2RWIRn72sEarw2kgqeXwO5DJettAHT2R2H22eJktsA7wwHDGO4 + hjuyFnzgLRUI2trclTXdCzg0yU/YQckhIU0iwHs07akkH9hjsLvxyq3cc5MSX8Krg/I0OLUZTl8Y + DiVw7XwrS3hYqcSyUktYqde0RdAOyAlWGLJVhA3KHaKD6Wh+UPKfxldqCCu199WNSp0Bf7QUsEEn + QDGb22rbHgP/v3C1K/oof5c+hI2OWAA7IIngOd07abvnHoKxOpDshpkooMWtdgbHkOZ3bLHm1haw + eZJRqk5yuks91KCLSVwC9p5S5oZdpAJDzECkylFJJo050qS1CViji7QhS7JLBo9pjA9J7i8zR9lN + XqnH0z92wLKNOu2Va609AbRzLFlOXqlve+TxuESWKx94E39pVSU5ivU6oI7s0sJEYa8y+jgA+JaX + tX2yf8oHbryshW8xj1vMZh2f6p+HHp3Or/aosGjbA1eT8+EfCNcFiiYbT/ZdGW1qLPrW/nHQbUF8 + AgxObP8u50/cnXVy1b/Q94Ax6AWLtQ9YkHlquS8LmJ7Pv5UdY86CVcSwJYNrIQzpKgosdWu7l03F + XRRs1iW5CoMP1D1vpV+bTTldXM7nFws1eBz8BAAA//8DACTaP5PnBQAA + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 07 Nov 2025 21:02:07 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '2911' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '2934' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo + give my best complete final answer to the task respond using the exact following + format:\n\nThought: I now can give a great answer\nFinal Answer: Your final + answer must be the great and the most complete as possible, it must be outcome + described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent + Task: Give an integer score between 1-5 for the title ''The impact of AI in + the future of work''\n\nThis is the expected criteria for your final answer: + Your best answer to your coworker asking you this, accounting for the context + shared.\nyou MUST return the actual complete content as the final answer, not + a summary.\n\nThis is the context you''re working with:\nThe requirement is + to evaluate the title ''The impact of AI in the future of work'' and provide + a score between 1-5, based on its potential impact, clarity, and relevance. + The score should be an integer and the Scorer should ensure the evaluation considers + the significance and the comprehensibility of the title.\n\nBegin! This is VERY + important to you, use the tools available and give your best Final Answer, your + job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1328' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.109.1 + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.109.1 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//jFTBbiM3DL37KwidWmA8iL1ONutbUKBtDm2BIsWirRcGLXFmmGikgcgZ + r7vIvy8kO7F3uwV6GWD0yMf3KFKfZgCGnVmDsR2q7Qc//+GvX2X84717/+fHm+mfn/z0y2P6Xa6k + D3QXTZUz4u6RrL5k1Tb2gyflGI6wTYRKmXXx9mZ5vXr3ZnlbgD468jmtHXS+qhfzngPPl1fL6/nV + ar5YndK7yJbErOHvGQDAp/LNQoOjj2YNV9XLSU8i2JJZvwYBmBR9PjEowqIY1FRn0MagFIr2hy6O + badruIcQ92AxQMsTAUKbDQAG2VPahB85oIe78peD93H0DsTGRKAdgbJ6go156Ai4H9AqxAbu7oFD + wZtRx0T5bB/T08YAwqqGh9dMFrCeMFVgY7AsVAEGB4k8TRi0AseJrPoDoHOJRDi0gCHXiin7K+HK + PfkDaBzY1nCv0HHbeW47laJCuA3csM3xHBo/UrB0FFrBvmPbZSEINo2W0RfOPbvM6VjsKEIOWGSk + CkgGykH+AIlaTC4rsh2GluTFdfbaxGSphp/jniZKFbCCLc3bEUjR5g/ZRooTOdgdYEeZqc+dLSUa + toC7OOpJIeZTlSx7Y4693phMywKJGkop52uE76hu6woe404qkCf2XioYUnSjVZ5YD9/X8NtECb0v + sqhpKCPZro1hosPXXbNUWsIChML+AGNwlPJ8Odx5qqDHp1ycFRBEUyyX5E4T0Yz+eNv15TAmakbB + vBFh9P4CwBCiYt6osgYfTsjz6+D72A4p7uSrVNNwYOm2iVBiyEMuGgdT0OcZwIeyYOMXO2OGFPtB + txqfqJRb3twe+cx5sc/oYvn2hGpU9Gfgzbvr6huEW0eK7OViR41F25E7p54XGkfH8QKYXdj+t5xv + cR+tc2j/D/0ZsJYGJbcdEjm2X1o+hyXKD99/hb22uQg2QmliS1tlSvkqHDU4+uNrZOQgSv224dBS + GhIfn6Rm2K7s8vZ60dzeLM3sefYZAAD//wMAb5Ssy6EFAAA= + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 07 Nov 2025 21:02:10 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '2101' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '2238' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Crew Manager. You are + a seasoned manager with a knack for getting the best out of your team.\nYou + are also known for your ability to delegate work to the right people, and to + ask the right questions to get the best out of your team.\nEven though you don''t + perform tasks by yourself, you have a lot of experience in the field, which + allows you to properly evaluate the work of your team members.\nYour personal + goal is: Manage the team to complete the task in the best way possible.\nYou + ONLY have access to the following tools, and should NEVER make up tools that + are not listed here:\n\nTool Name: Delegate work to coworker\nTool Arguments: + {''task'': {''description'': ''The task to delegate'', ''type'': ''str''}, ''context'': + {''description'': ''The context for the task'', ''type'': ''str''}, ''coworker'': + {''description'': ''The role/name of the coworker to delegate to'', ''type'': + ''str''}}\nTool Description: Delegate a specific task to one of the following + coworkers: Scorer\nThe input to this tool should be the coworker, the task you + want them to do, and ALL necessary context to execute the task, they know nothing + about the task, so share absolutely everything you know, don''t reference things + but instead explain them.\nTool Name: Ask question to coworker\nTool Arguments: + {''question'': {''description'': ''The question to ask'', ''type'': ''str''}, + ''context'': {''description'': ''The context for the question'', ''type'': ''str''}, + ''coworker'': {''description'': ''The role/name of the coworker to ask'', ''type'': + ''str''}}\nTool Description: Ask a specific question to one of the following + coworkers: Scorer\nThe input to this tool should be the coworker, the question + you have for them, and ALL necessary context to ask the question properly, they + know nothing about the question, so share absolutely everything you know, don''t + reference things but instead explain them.\n\nIMPORTANT: Use the following format + in your response:\n\n```\nThought: you should always think about what to do\nAction: + the action to take, only one name of [Delegate work to coworker, Ask question + to coworker], just the name, exactly as it''s written.\nAction Input: the input + to the action, just a simple JSON object, enclosed in curly braces, using \" + to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce + all necessary information is gathered, return the following format:\n\n```\nThought: + I now know the final answer\nFinal Answer: the final answer to the original + input question\n```"},{"role":"user","content":"\nCurrent Task: Give me an integer + score between 1-5 for the following title: ''The impact of AI in the future + of work''\n\nThis is the expected criteria for your final answer: The score + of the title.\nyou MUST return the actual complete content as the final answer, + not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI + schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": + \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"},{"role":"assistant","content":"Thought: + I need to delegate the task of scoring the title ''The impact of AI in the future + of work'' to our Scorer. I will provide all the context necessary for them to + execute the task properly.\n\nAction: Delegate work to coworker\nAction Input: + {\"task\": \"Give an integer score between 1-5 for the title ''The impact of + AI in the future of work''\", \"context\": \"The requirement is to evaluate + the title ''The impact of AI in the future of work'' and provide a score between + 1-5, based on its potential impact, clarity, and relevance. The score should + be an integer and the Scorer should ensure the evaluation considers the significance + and the comprehensibility of the title.\", \"coworker\": \"Scorer\"}\nObservation: + I would score the title \"The impact of AI in the future of work\" a 4. The + title is clear, concise, and relevant, directly addressing an important and + timely topic. It highlights the significant influence of AI, which is a crucial + and widely discussed issue, especially regarding changes in the workforce. However, + it could be slightly improved by being more specific about which aspects of + \"impact\" it is referring to (e.g., jobs, skills, productivity). Overall, it + effectively conveys the significance and is easily understandable, making it + a strong and impactful title."}],"model":"gpt-4o"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '4764' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.109.1 + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.109.1 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//jFJNj9MwEL3nV1g+N6gf6Qe5rZCQlgMHQIsEXUWuPUncOh5jTyio6n9H + dkuThV1pL5bsN+/5vZk5ZYxxrXjJuGwFyc6Z/N23j+G4fjg87JX6vJfhcNBfd/jpxwd3t7rnk8jA + 3R4k/WW9kdg5A6TRXmDpQRBE1dl6NV8WbxeLaQI6VGAirXGUF5jPp/Min27y6epKbFFLCLxk3zPG + GDulM1q0Cn7xkiWZ9NJBCKIBXt6KGOMeTXzhIgQdSFjikwGUaAlscv2lxb5pqWT3zOKRHeJBLbBa + W2GYsOEIfmvfp9tdupXstOVBooctL1lxHgt7qPsgYi7bGzMChLVIIvYlRXq8IudbCION87gL/1B5 + ra0ObeVBBLTRcCB0PKHnjLHH1Kz+SX7uPHaOKsIDpO9m0/niIsiH+YzgzRUkJGHGtGI2eUaxUkBC + mzBqOJdCtqAG7jAd0SuNIyAb5f7fznPal+zaNq+RHwApwRGoynlQWj6NPJR5iPv7Utmtz8kwD+B/ + agkVafBxFgpq0ZvLavHwOxB0Va1tA955fdmv2lVyV8/Wm+VytebZOfsDAAD//wMAwR0QkmgDAAA= + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 07 Nov 2025 21:02:11 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '510' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '528' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/events/__init__.py b/lib/crewai/tests/events/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/crewai/tests/experimental/evaluation/test_agent_evaluator.py b/lib/crewai/tests/experimental/evaluation/test_agent_evaluator.py index 6d6fe66f8..55f6a2413 100644 --- a/lib/crewai/tests/experimental/evaluation/test_agent_evaluator.py +++ b/lib/crewai/tests/experimental/evaluation/test_agent_evaluator.py @@ -54,7 +54,7 @@ class TestAgentEvaluator: agent_evaluator.set_iteration(3) assert agent_evaluator._execution_state.iteration == 3 - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_evaluate_current_iteration(self, mock_crew): from crewai.events.types.task_events import TaskCompletedEvent @@ -126,7 +126,7 @@ class TestAgentEvaluator: ): assert isinstance(evaluator, expected_type) - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_eval_specific_agents_from_crew(self, mock_crew): from crewai.events.types.task_events import TaskCompletedEvent @@ -215,7 +215,7 @@ class TestAgentEvaluator: assert goal_alignment.raw_response is not None assert '"score": 5' in goal_alignment.raw_response - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_failed_evaluation(self, mock_crew): (agent,) = mock_crew.agents (task,) = mock_crew.tasks diff --git a/lib/crewai/tests/knowledge/test_knowledge.py b/lib/crewai/tests/knowledge/test_knowledge.py index b56d46a74..b0f35c4d9 100644 --- a/lib/crewai/tests/knowledge/test_knowledge.py +++ b/lib/crewai/tests/knowledge/test_knowledge.py @@ -53,7 +53,7 @@ def test_single_short_string(mock_vector_db): mock_vector_db.query.assert_called_once() -# @pytest.mark.vcr(filter_headers=["authorization"]) +# @pytest.mark.vcr() def test_single_2k_character_string(mock_vector_db): # Create a 2k character string with various facts about Brandon content = ( @@ -374,7 +374,7 @@ def test_multiple_2k_character_files(mock_vector_db, tmpdir): mock_vector_db.query.assert_called_once() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_hybrid_string_and_files(mock_vector_db, tmpdir): # Create string sources string_contents = [ @@ -443,7 +443,7 @@ def test_pdf_knowledge_source(mock_vector_db): mock_vector_db.query.assert_called_once() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_csv_knowledge_source(mock_vector_db, tmpdir): """Test CSVKnowledgeSource with a simple CSV file.""" diff --git a/lib/crewai/tests/llms/anthropic/__init__.py b/lib/crewai/tests/llms/anthropic/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/crewai/tests/llms/bedrock/__init__.py b/lib/crewai/tests/llms/bedrock/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/crewai/tests/llms/google/__init__.py b/lib/crewai/tests/llms/google/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/crewai/tests/llms/hooks/test_openai_interceptor.py b/lib/crewai/tests/llms/hooks/test_openai_interceptor.py index 9c3b8537e..32d5a070e 100644 --- a/lib/crewai/tests/llms/hooks/test_openai_interceptor.py +++ b/lib/crewai/tests/llms/hooks/test_openai_interceptor.py @@ -55,7 +55,7 @@ class TestOpenAIInterceptorIntegration: assert llm.interceptor is interceptor - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_openai_call_with_interceptor_tracks_requests(self) -> None: """Test that interceptor tracks OpenAI API requests.""" interceptor = OpenAITestInterceptor() @@ -152,7 +152,7 @@ class TestOpenAILoggingInterceptor: assert llm.interceptor is interceptor assert isinstance(llm.interceptor, LoggingInterceptor) - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_logging_interceptor_tracks_details(self) -> None: """Test that logging interceptor tracks request/response details.""" interceptor = LoggingInterceptor() @@ -241,7 +241,7 @@ class TestOpenAIAuthInterceptor: assert "X-Organization-ID" in modified_request.headers assert modified_request.headers["X-Organization-ID"] == "test-org" - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_auth_interceptor_with_real_call(self) -> None: """Test that auth interceptor works with real OpenAI API call.""" interceptor = AuthInterceptor(api_key="custom-123", org_id="org-789") diff --git a/lib/crewai/tests/llms/openai/__init__.py b/lib/crewai/tests/llms/openai/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/crewai/tests/llms/openai/test_openai.py b/lib/crewai/tests/llms/openai/test_openai.py index b25e617dc..9cb94cba7 100644 --- a/lib/crewai/tests/llms/openai/test_openai.py +++ b/lib/crewai/tests/llms/openai/test_openai.py @@ -34,7 +34,7 @@ def test_openai_completion_is_used_when_no_provider_prefix(): assert llm.provider == "openai" assert llm.model == "gpt-4o" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_openai_is_default_provider_without_explicit_llm_set_on_agent(): """ Test that OpenAI is the default provider when no explicit LLM is set on the agent @@ -302,7 +302,7 @@ def test_openai_completion_with_tools(): assert call_kwargs['tools'] is not None assert len(call_kwargs['tools']) > 0 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_openai_completion_call_returns_usage_metrics(): """ Test that OpenAICompletion.call returns usage metrics diff --git a/lib/crewai/tests/memory/test_external_memory.py b/lib/crewai/tests/memory/test_external_memory.py index ac8516bca..096e198ae 100644 --- a/lib/crewai/tests/memory/test_external_memory.py +++ b/lib/crewai/tests/memory/test_external_memory.py @@ -194,7 +194,7 @@ def test_crew_external_memory_reset(mem_type, crew_with_external_memory): @pytest.mark.parametrize("mem_method", ["search", "save"]) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_external_memory_save_with_memory_flag( mem_method, crew_with_external_memory ): @@ -206,7 +206,7 @@ def test_crew_external_memory_save_with_memory_flag( @pytest.mark.parametrize("mem_method", ["search", "save"]) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_external_memory_save_using_crew_without_memory_flag( mem_method, crew_with_external_memory_without_memory_flag ): diff --git a/lib/crewai/tests/project/__init__.py b/lib/crewai/tests/project/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/crewai/tests/rag/config/__init__.py b/lib/crewai/tests/rag/config/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/crewai/tests/telemetry/test_telemetry.py b/lib/crewai/tests/telemetry/test_telemetry.py index 2429a4ade..b7df3bc25 100644 --- a/lib/crewai/tests/telemetry/test_telemetry.py +++ b/lib/crewai/tests/telemetry/test_telemetry.py @@ -54,7 +54,7 @@ def test_telemetry_enabled_by_default(): "opentelemetry.exporter.otlp.proto.http.trace_exporter.OTLPSpanExporter.export", side_effect=Exception("Test exception"), ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_telemetry_fails_due_connect_timeout(export_mock, logger_mock): error = Exception("Test exception") export_mock.side_effect = error diff --git a/lib/crewai/tests/test_crew.py b/lib/crewai/tests/test_crew.py index 1a1db50af..a748fd9ea 100644 --- a/lib/crewai/tests/test_crew.py +++ b/lib/crewai/tests/test_crew.py @@ -286,7 +286,7 @@ def test_crew_config_with_wrong_keys(): Crew(process=Process.sequential, config=no_agents_config) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_creation(researcher, writer): tasks = [ Task( @@ -318,7 +318,7 @@ def test_crew_creation(researcher, writer): assert result.raw == expected_string_output -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_sync_task_execution(researcher, writer): tasks = [ Task( @@ -357,7 +357,7 @@ def test_sync_task_execution(researcher, writer): assert mock_execute_sync.call_count == len(tasks) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_hierarchical_process(researcher, writer): task = Task( description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", @@ -393,7 +393,7 @@ def test_manager_llm_requirement_for_hierarchical_process(researcher, writer): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_manager_agent_delegating_to_assigned_task_agent(researcher, writer): """ Test that the manager agent delegates to the assigned task agent. @@ -445,7 +445,7 @@ def test_manager_agent_delegating_to_assigned_task_agent(researcher, writer): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_manager_agent_delegating_to_all_agents(researcher, writer): """ Test that the manager agent delegates to all agents when none are specified. @@ -478,7 +478,7 @@ def test_manager_agent_delegating_to_all_agents(researcher, writer): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_manager_agent_delegates_with_varied_role_cases(): """ Test that the manager agent can delegate to agents regardless of case or whitespace variations in role names. @@ -555,7 +555,7 @@ def test_manager_agent_delegates_with_varied_role_cases(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_with_delegating_agents(ceo, writer): tasks = [ Task( @@ -579,7 +579,7 @@ def test_crew_with_delegating_agents(ceo, writer): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_with_delegating_agents_should_not_override_task_tools(ceo, writer): class TestToolInput(BaseModel): """Input schema for TestTool.""" @@ -635,7 +635,7 @@ def test_crew_with_delegating_agents_should_not_override_task_tools(ceo, writer) ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_with_delegating_agents_should_not_override_agent_tools(ceo, writer): class TestToolInput(BaseModel): """Input schema for TestTool.""" @@ -693,7 +693,7 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools(ceo, writer ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_task_tools_override_agent_tools(researcher): class TestToolInput(BaseModel): """Input schema for TestTool.""" @@ -742,7 +742,7 @@ def test_task_tools_override_agent_tools(researcher): assert isinstance(new_researcher.tools[0], TestTool) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_task_tools_override_agent_tools_with_allow_delegation(researcher, writer): """ Test that task tools override agent tools while preserving delegation tools when allow_delegation=True @@ -819,7 +819,7 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write assert isinstance(researcher_with_delegation.tools[0], TestTool) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_verbose_output(researcher, writer, capsys): tasks = [ Task( @@ -863,7 +863,7 @@ def test_crew_verbose_output(researcher, writer, capsys): assert crew_quiet.verbose is False -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_cache_hitting_between_agents(researcher, writer, ceo): @tool def multiplier(first_number: int, second_number: int) -> float: @@ -917,7 +917,7 @@ def test_cache_hitting_between_agents(researcher, writer, ceo): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_api_calls_throttling(capsys): @tool def get_final_answer() -> float: @@ -952,7 +952,7 @@ def test_api_calls_throttling(capsys): moveon.assert_called() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_kickoff_usage_metrics(): inputs = [ {"topic": "dog"}, @@ -987,7 +987,7 @@ def test_crew_kickoff_usage_metrics(): assert result.token_usage.cached_prompt_tokens == 0 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_kickoff_streaming_usage_metrics(): inputs = [ {"topic": "dog"}, @@ -1043,7 +1043,7 @@ def test_agents_rpm_is_never_set_if_crew_max_rpm_is_not_set(): assert agent._rpm_controller is None -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_sequential_async_task_execution_completion(researcher, writer): list_ideas = Task( description="Give me a list of 5 interesting ideas to explore for an article, what makes them unique and interesting.", @@ -1075,7 +1075,7 @@ def test_sequential_async_task_execution_completion(researcher, writer): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_single_task_with_async_execution(): researcher_agent = Agent( role="Researcher", @@ -1103,7 +1103,7 @@ def test_single_task_with_async_execution(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_three_task_with_async_execution(): researcher_agent = Agent( role="Researcher", @@ -1149,7 +1149,7 @@ def test_three_task_with_async_execution(): @pytest.mark.asyncio -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() async def test_crew_async_kickoff(): inputs = [ {"topic": "dog"}, @@ -1197,7 +1197,7 @@ async def test_crew_async_kickoff(): @pytest.mark.asyncio -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() async def test_async_task_execution_call_count(researcher, writer): list_ideas = Task( description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.", @@ -1251,7 +1251,7 @@ async def test_async_task_execution_call_count(researcher, writer): assert mock_execute_sync.call_count == 1 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_kickoff_for_each_single_input(): """Tests if kickoff_for_each works with a single input.""" @@ -1275,7 +1275,7 @@ def test_kickoff_for_each_single_input(): assert len(results) == 1 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_kickoff_for_each_multiple_inputs(): """Tests if kickoff_for_each works with multiple inputs.""" @@ -1303,7 +1303,7 @@ def test_kickoff_for_each_multiple_inputs(): assert len(results) == len(inputs) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_kickoff_for_each_empty_input(): """Tests if kickoff_for_each handles an empty input list.""" agent = Agent( @@ -1323,7 +1323,7 @@ def test_kickoff_for_each_empty_input(): assert results == [] -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_kickoff_for_each_invalid_input(): """Tests if kickoff_for_each raises TypeError for invalid input types.""" @@ -1554,7 +1554,7 @@ def test_dont_set_agents_step_callback_if_already_set(): assert researcher_agent.step_callback is agent_callback -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_function_calling_llm(): llm = LLM(model="gpt-4o-mini") @@ -1583,7 +1583,7 @@ def test_crew_function_calling_llm(): assert result.raw == "Howdy!" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_task_with_no_arguments(): @tool def return_data() -> str: @@ -1649,7 +1649,7 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_delegation_is_not_enabled_if_there_are_only_one_agent(): researcher = Agent( role="Researcher", @@ -1670,7 +1670,7 @@ def test_delegation_is_not_enabled_if_there_are_only_one_agent(): assert task.tools == [] -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agents_do_not_get_delegation_tools_with_there_is_only_one_agent(): agent = Agent( role="Researcher", @@ -1688,7 +1688,7 @@ def test_agents_do_not_get_delegation_tools_with_there_is_only_one_agent(): assert len(agent.tools) == 0 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_sequential_crew_creation_tasks_without_agents(researcher): task = Task( description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", @@ -1711,7 +1711,7 @@ def test_sequential_crew_creation_tasks_without_agents(researcher): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_usage_metrics_are_captured_for_hierarchical_process(): agent = Agent( role="Researcher", @@ -1808,7 +1808,7 @@ def test_hierarchical_kickoff_usage_metrics_include_manager(researcher): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_hierarchical_crew_creation_tasks_with_agents(researcher, writer): """ Agents are not required for tasks in a hierarchical process but sometimes they are still added @@ -1861,7 +1861,7 @@ def test_hierarchical_crew_creation_tasks_with_agents(researcher, writer): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_hierarchical_crew_creation_tasks_with_async_execution(researcher, writer, ceo): """ Tests that async tasks in hierarchical crews are handled correctly with proper delegation tools @@ -1918,7 +1918,7 @@ def test_hierarchical_crew_creation_tasks_with_async_execution(researcher, write ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_hierarchical_crew_creation_tasks_with_sync_last(researcher, writer, ceo): """ Agents are not required for tasks in a hierarchical process but sometimes they are still added @@ -2006,7 +2006,7 @@ def test_crew_inputs_interpolate_both_agents_and_tasks_diff(): interpolate_task_inputs.assert_called() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_does_not_interpolate_without_inputs(): agent = Agent( role="{topic} Researcher", @@ -2133,7 +2133,7 @@ def test_task_same_callback_both_on_task_and_crew(): mock_callback.assert_called_once_with(list_ideas.output) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_tools_with_custom_caching(): @tool def multiplcation_tool(first_number: int, second_number: int) -> int: @@ -2205,7 +2205,7 @@ def test_tools_with_custom_caching(): assert result.raw == "3" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_conditional_task_uses_last_output(researcher, writer): """Test that conditional tasks use the last task output for condition evaluation.""" task1 = Task( @@ -2279,7 +2279,7 @@ def test_conditional_task_uses_last_output(researcher, writer): ) # Third task used first task's output -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_conditional_tasks_result_collection(researcher, writer): """Test that task outputs are properly collected based on execution status.""" task1 = Task( @@ -2360,7 +2360,7 @@ def test_conditional_tasks_result_collection(researcher, writer): ) # Third task executed -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_multiple_conditional_tasks(researcher, writer): """Test that having multiple conditional tasks in sequence works correctly.""" task1 = Task( @@ -2409,7 +2409,7 @@ def test_multiple_conditional_tasks(researcher, writer): assert len(result.tasks_output) == 3 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_using_contextual_memory(): math_researcher = Agent( role="Researcher", @@ -2437,7 +2437,7 @@ def test_using_contextual_memory(): contextual_mem.assert_called_once() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_memory_events_are_emitted(): events = defaultdict(list) condition = threading.Condition() @@ -2534,7 +2534,7 @@ def test_memory_events_are_emitted(): assert len(events["MemoryRetrievalCompletedEvent"]) == 1 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_using_contextual_memory_with_long_term_memory(): math_researcher = Agent( role="Researcher", @@ -2563,7 +2563,7 @@ def test_using_contextual_memory_with_long_term_memory(): assert crew.memory is False -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_warning_long_term_memory_without_entity_memory(): math_researcher = Agent( role="Researcher", @@ -2598,7 +2598,7 @@ def test_warning_long_term_memory_without_entity_memory(): save_memory.assert_not_called() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_long_term_memory_with_memory_flag(): math_researcher = Agent( role="Researcher", @@ -2628,7 +2628,7 @@ def test_long_term_memory_with_memory_flag(): save_memory.assert_called_once() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_using_contextual_memory_with_short_term_memory(): math_researcher = Agent( role="Researcher", @@ -2657,7 +2657,7 @@ def test_using_contextual_memory_with_short_term_memory(): assert crew.memory is False -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_disabled_memory_using_contextual_memory(): math_researcher = Agent( role="Researcher", @@ -2685,7 +2685,7 @@ def test_disabled_memory_using_contextual_memory(): contextual_mem.assert_not_called() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_log_file_output(tmp_path, researcher): test_file = tmp_path / "logs.txt" tasks = [ @@ -2701,7 +2701,7 @@ def test_crew_log_file_output(tmp_path, researcher): assert test_file.exists() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_output_file_end_to_end(tmp_path): """Test output file functionality in a full crew context.""" # Create an agent @@ -2784,7 +2784,7 @@ def test_crew_output_file_validation_failures(): Crew(agents=[agent], tasks=[task]).kickoff() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_manager_agent(researcher, writer): task = Task( description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", @@ -2843,7 +2843,7 @@ def test_manager_agent_in_agents_raises_exception(researcher, writer): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_manager_agent_with_tools_raises_exception(researcher, writer): @tool def testing_tool(first_number: int, second_number: int) -> int: @@ -2874,7 +2874,7 @@ def test_manager_agent_with_tools_raises_exception(researcher, writer): crew.kickoff() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_train_success(researcher, writer, monkeypatch): task = Task( description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", @@ -2922,7 +2922,7 @@ def test_crew_train_success(researcher, writer, monkeypatch): assert isinstance(received_events[1], CrewTrainCompletedEvent) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_train_error(researcher, writer): task = Task( description="Come up with a list of 5 interesting ideas to explore for an article", @@ -2972,7 +2972,7 @@ def test__setup_for_training(researcher, writer): assert agent.allow_delegation is False -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_replay_feature(researcher, writer): list_ideas = Task( description="Generate a list of 5 interesting ideas to explore for an article, where each bulletpoint is under 15 words.", @@ -3009,7 +3009,7 @@ def test_replay_feature(researcher, writer): assert mock_execute_task.call_count == 3 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_replay_error(researcher, writer): task = Task( description="Come up with a list of 5 interesting ideas to explore for an article", @@ -3027,7 +3027,7 @@ def test_crew_replay_error(researcher, writer): assert "task_id is required" in str(e) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_task_db_init(): agent = Agent( role="Content Writer", @@ -3065,7 +3065,7 @@ def test_crew_task_db_init(): pytest.fail(f"An exception was raised: {e!s}") -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_replay_task_with_context(): agent1 = Agent( role="Researcher", @@ -3164,7 +3164,7 @@ def test_replay_task_with_context(): db_handler.reset() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_replay_with_context(): agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal") task1 = Task( @@ -3222,7 +3222,7 @@ def test_replay_with_context(): assert crew.tasks[1].context[0].output.raw == "context raw output" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_replay_with_context_set_to_nullable(): agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal") task1 = Task( @@ -3247,7 +3247,7 @@ def test_replay_with_context_set_to_nullable(): mock_execute_task.assert_called_with(agent=ANY, context="", tools=ANY) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_replay_with_invalid_task_id(): agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal") task1 = Task( @@ -3309,7 +3309,7 @@ def test_replay_with_invalid_task_id(): crew.replay("bf5b09c9-69bd-4eb8-be12-f9e5bae31c2d") -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() @patch.object(Crew, "_interpolate_inputs") def test_replay_interpolates_inputs_properly(mock_interpolate_inputs): agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal") @@ -3370,7 +3370,7 @@ def test_replay_interpolates_inputs_properly(mock_interpolate_inputs): assert mock_interpolate_inputs.call_count == 2 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_replay_setup_context(): agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal") task1 = Task(description="Context Task", expected_output="Say {name}", agent=agent) @@ -3523,7 +3523,7 @@ def test_conditional_task_requirement_breaks_when_singular_conditional_task( ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_conditional_task_last_task_when_conditional_is_true(researcher, writer): def condition_fn(output) -> bool: return True @@ -3550,7 +3550,7 @@ def test_conditional_task_last_task_when_conditional_is_true(researcher, writer) ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_conditional_task_last_task_when_conditional_is_false(researcher, writer): def condition_fn(output) -> bool: return False @@ -3599,7 +3599,7 @@ def test_conditional_task_requirement_breaks_when_task_async(researcher, writer) ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_conditional_should_skip(researcher, writer): task1 = Task(description="Return hello", expected_output="say hi", agent=researcher) @@ -3631,7 +3631,7 @@ def test_conditional_should_skip(researcher, writer): assert result.raw.startswith("Task 1 output") -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_conditional_should_execute(researcher, writer): task1 = Task(description="Return hello", expected_output="say hi", agent=researcher) @@ -3662,7 +3662,7 @@ def test_conditional_should_execute(researcher, writer): assert mock_execute_sync.call_count == 2 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_testing_function(researcher): task = Task( description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", @@ -3706,7 +3706,7 @@ def test_crew_testing_function(researcher): assert isinstance(received_events[1], CrewTestCompletedEvent) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_hierarchical_verbose_manager_agent(researcher, writer): task = Task( description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", @@ -3727,7 +3727,7 @@ def test_hierarchical_verbose_manager_agent(researcher, writer): assert crew.manager_agent.verbose -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_hierarchical_verbose_false_manager_agent(researcher, writer): task = Task( description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", @@ -3771,7 +3771,7 @@ def test_fetch_inputs(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_task_tools_preserve_code_execution_tools(): """ Test that task tools don't override code execution tools when allow_code_execution=True @@ -3853,7 +3853,7 @@ def test_task_tools_preserve_code_execution_tools(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_multimodal_flag_adds_multimodal_tools(): """ Test that an agent with multimodal=True automatically has multimodal tools added to the task execution. @@ -3900,7 +3900,7 @@ def test_multimodal_flag_adds_multimodal_tools(): assert len(used_tools) == 1, "Should only have the AddImageTool" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_multimodal_agent_image_tool_handling(): """ Test that multimodal agents properly handle image tools in the CrewAgentExecutor @@ -3974,7 +3974,7 @@ def test_multimodal_agent_image_tool_handling(): assert result["content"][1]["type"] == "image_url" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_multimodal_agent_describing_image_successfully(): """ Test that a multimodal agent can process images without validation errors. @@ -4012,7 +4012,7 @@ def test_multimodal_agent_describing_image_successfully(): assert task_output.raw == result.raw -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_multimodal_agent_live_image_analysis(): """ Test that multimodal agents can analyze images through a real API call @@ -4055,7 +4055,7 @@ def test_multimodal_agent_live_image_analysis(): assert "error" not in result.raw.lower() # No error messages in response -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_with_failing_task_guardrails(): """Test that crew properly handles failing guardrails and retries with validation feedback.""" @@ -4112,7 +4112,7 @@ def test_crew_with_failing_task_guardrails(): assert task_output.raw == result.raw -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_guardrail_feedback_in_context(): """Test that guardrail feedback is properly appended to task context for retries.""" @@ -4169,7 +4169,7 @@ def test_crew_guardrail_feedback_in_context(): assert task.retry_count == 1, "Task should have been retried once" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_before_kickoff_callback(): @CrewBase class TestCrewClass: @@ -4226,7 +4226,7 @@ def test_before_kickoff_callback(): assert inputs.get("modified") -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_before_kickoff_without_inputs(): @CrewBase class TestCrewClass: @@ -4282,7 +4282,7 @@ def test_before_kickoff_without_inputs(): assert test_crew_instance.received_inputs.get("modified") is True -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_with_knowledge_sources_works_with_copy(researcher, writer): content = "Brandon's favorite color is red and he likes Mexican food." string_source = StringKnowledgeSource(content=content) @@ -4433,7 +4433,7 @@ def test_sets_parent_flow_when_outside_flow(researcher, writer): assert crew.parent_flow is None -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_sets_parent_flow_when_inside_flow(researcher, writer): class MyFlow(Flow): @start() @@ -4639,7 +4639,7 @@ def test_default_crew_name(researcher, writer): assert crew.name == "crew" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_ensure_exchanged_messages_are_propagated_to_external_memory(): external_memory = ExternalMemory(storage=MagicMock()) diff --git a/lib/crewai/tests/test_custom_llm.py b/lib/crewai/tests/test_custom_llm.py index fef1bb5b5..25ec1151e 100644 --- a/lib/crewai/tests/test_custom_llm.py +++ b/lib/crewai/tests/test_custom_llm.py @@ -78,7 +78,7 @@ class CustomLLM(BaseLLM): return 4096 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_custom_llm_implementation(): """Test that a custom LLM implementation works with create_llm.""" custom_llm = CustomLLM(response="The answer is 42") @@ -97,7 +97,7 @@ def test_custom_llm_implementation(): assert "42" in response -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_custom_llm_within_crew(): """Test that a custom LLM implementation works with create_llm.""" custom_llm = CustomLLM(response="Hello! Nice to meet you!", model="test-model") diff --git a/lib/crewai/tests/test_llm.py b/lib/crewai/tests/test_llm.py index 3555ee8c5..60c5b83b5 100644 --- a/lib/crewai/tests/test_llm.py +++ b/lib/crewai/tests/test_llm.py @@ -18,7 +18,7 @@ import pytest # TODO: This test fails without print statement, which makes me think that something is happening asynchronously that we need to eventually fix and dive deeper into at a later date -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_callback_replacement(): llm1 = LLM(model="gpt-4o-mini", is_litellm=True) llm2 = LLM(model="gpt-4o-mini", is_litellm=True) @@ -45,7 +45,7 @@ def test_llm_callback_replacement(): assert usage_metrics_1 == calc_handler_1.token_cost_process.get_summary() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_call_with_string_input(): llm = LLM(model="gpt-4o-mini") @@ -55,7 +55,7 @@ def test_llm_call_with_string_input(): assert len(result.strip()) > 0 # Ensure the response is not empty -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_call_with_string_input_and_callbacks(): llm = LLM(model="gpt-4o-mini", is_litellm=True) calc_handler = TokenCalcHandler(token_cost_process=TokenProcess()) @@ -72,7 +72,7 @@ def test_llm_call_with_string_input_and_callbacks(): assert usage_metrics.successful_requests == 1 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_call_with_message_list(): llm = LLM(model="gpt-4o-mini") messages = [{"role": "user", "content": "What is the capital of France?"}] @@ -83,7 +83,7 @@ def test_llm_call_with_message_list(): assert "Paris" in result -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_call_with_tool_and_string_input(): llm = LLM(model="gpt-4o-mini") @@ -121,7 +121,7 @@ def test_llm_call_with_tool_and_string_input(): assert result == get_current_year() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_call_with_tool_and_message_list(): llm = LLM(model="gpt-4o-mini", is_litellm=True) @@ -161,7 +161,7 @@ def test_llm_call_with_tool_and_message_list(): assert result == 25 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_passes_additional_params(): llm = LLM( model="gpt-4o-mini", @@ -289,7 +289,7 @@ def test_gemma3(model): assert "Paris" in result -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() @pytest.mark.parametrize( "model", ["gpt-4.1", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano-2025-04-14"] ) @@ -300,7 +300,7 @@ def test_gpt_4_1(model): assert "Paris" in result -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_o3_mini_reasoning_effort_high(): llm = LLM( model="o3-mini", @@ -311,7 +311,7 @@ def test_o3_mini_reasoning_effort_high(): assert "Paris" in result -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_o3_mini_reasoning_effort_low(): llm = LLM( model="o3-mini", @@ -322,7 +322,7 @@ def test_o3_mini_reasoning_effort_low(): assert "Paris" in result -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_o3_mini_reasoning_effort_medium(): llm = LLM( model="o3-mini", @@ -411,7 +411,7 @@ def test_context_window_exceeded_error_handling(): assert "8192 tokens" in str(excinfo.value) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() @pytest.fixture def anthropic_llm(): """Fixture providing an Anthropic LLM instance.""" @@ -551,7 +551,7 @@ def mock_emit() -> MagicMock: yield mock_emit -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_handle_streaming_tool_calls(get_weather_tool_schema, mock_emit): llm = LLM(model="openai/gpt-4o", stream=True, is_litellm=True) response = llm.call( @@ -579,7 +579,7 @@ def test_handle_streaming_tool_calls(get_weather_tool_schema, mock_emit): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_handle_streaming_tool_calls_with_error(get_weather_tool_schema, mock_emit): def get_weather_error(location): raise Exception("Error") @@ -604,7 +604,7 @@ def test_handle_streaming_tool_calls_with_error(get_weather_tool_schema, mock_em ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_handle_streaming_tool_calls_no_available_functions( get_weather_tool_schema, mock_emit ): @@ -625,7 +625,7 @@ def test_handle_streaming_tool_calls_no_available_functions( ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_handle_streaming_tool_calls_no_tools(mock_emit): llm = LLM(model="openai/gpt-4o", stream=True, is_litellm=True) response = llm.call( @@ -646,7 +646,7 @@ def test_handle_streaming_tool_calls_no_tools(mock_emit): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_call_when_stop_is_unsupported(caplog): llm = LLM(model="o1-mini", stop=["stop"], is_litellm=True) with caplog.at_level(logging.INFO): @@ -656,7 +656,7 @@ def test_llm_call_when_stop_is_unsupported(caplog): assert "Paris" in result -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_call_when_stop_is_unsupported_when_additional_drop_params_is_provided( caplog, ): diff --git a/lib/crewai/tests/test_project.py b/lib/crewai/tests/test_project.py index 5106aae6e..784d97cc0 100644 --- a/lib/crewai/tests/test_project.py +++ b/lib/crewai/tests/test_project.py @@ -171,7 +171,7 @@ def test_task_guardrail(): assert reporting_task.guardrail is None -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_before_kickoff_modification(): crew = InternalCrew() inputs = {"topic": "LLMs"} @@ -179,7 +179,7 @@ def test_before_kickoff_modification(): assert "bicycles" in result.raw, "Before kickoff function did not modify inputs" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_after_kickoff_modification(): crew = InternalCrew() # Assuming the crew execution returns a dict @@ -190,14 +190,14 @@ def test_after_kickoff_modification(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_before_kickoff_with_none_input(): crew = InternalCrew() crew.crew().kickoff(None) # Test should pass without raising exceptions -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_multiple_before_after_kickoff(): @CrewBase class MultipleHooksCrew: diff --git a/lib/crewai/tests/test_task.py b/lib/crewai/tests/test_task.py index 73fedfc88..4c216a05e 100644 --- a/lib/crewai/tests/test_task.py +++ b/lib/crewai/tests/test_task.py @@ -288,7 +288,7 @@ def test_guardrail_type_error(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_output_pydantic_sequential(): class ScoreOutput(BaseModel): score: int @@ -313,7 +313,7 @@ def test_output_pydantic_sequential(): assert result.to_dict() == {"score": 4} -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_output_pydantic_hierarchical(): class ScoreOutput(BaseModel): score: int @@ -343,7 +343,7 @@ def test_output_pydantic_hierarchical(): assert result.to_dict() == {"score": 4} -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_output_json_sequential(): import uuid @@ -375,7 +375,7 @@ def test_output_json_sequential(): os.remove(output_file) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_output_json_hierarchical(): class ScoreOutput(BaseModel): score: int @@ -401,11 +401,11 @@ def test_output_json_hierarchical(): manager_llm="gpt-4o", ) result = crew.kickoff() - assert result.json == '{"score": 4}' - assert result.to_dict() == {"score": 4} + assert result.json == '{"score": 5}' + assert result.to_dict() == {"score": 5} -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_inject_date(): reporter = Agent( role="Reporter", @@ -430,7 +430,7 @@ def test_inject_date(): assert "2025-05-21" in result.raw -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_inject_date_custom_format(): reporter = Agent( role="Reporter", @@ -456,7 +456,7 @@ def test_inject_date_custom_format(): assert "May 21, 2025" in result.raw -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_no_inject_date(): reporter = Agent( role="Reporter", @@ -481,7 +481,7 @@ def test_no_inject_date(): assert "2025-05-21" not in result.raw -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_json_property_without_output_json(): class ScoreOutput(BaseModel): score: int @@ -509,7 +509,7 @@ def test_json_property_without_output_json(): assert "No JSON output found in the final task." in str(excinfo.value) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_output_json_dict_sequential(): class ScoreOutput(BaseModel): score: int @@ -534,7 +534,7 @@ def test_output_json_dict_sequential(): assert result.to_dict() == {"score": 4} -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_output_json_dict_hierarchical(): class ScoreOutput(BaseModel): score: int @@ -564,7 +564,7 @@ def test_output_json_dict_hierarchical(): assert result.to_dict() == {"score": 4} -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_output_pydantic_to_another_task(): class ScoreOutput(BaseModel): score: int @@ -602,7 +602,7 @@ def test_output_pydantic_to_another_task(): assert pydantic_result.score == 5 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_output_json_to_another_task(): class ScoreOutput(BaseModel): score: int @@ -633,7 +633,7 @@ def test_output_json_to_another_task(): assert '{"score": 3}' == result.json -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_save_task_output(): scorer = Agent( role="Scorer", @@ -657,7 +657,7 @@ def test_save_task_output(): save_file.assert_called_once() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_save_task_json_output(): from unittest.mock import patch @@ -695,7 +695,7 @@ def test_save_task_json_output(): assert "score" in data -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_save_task_pydantic_output(): import uuid @@ -728,7 +728,7 @@ def test_save_task_pydantic_output(): os.remove(output_file) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_custom_converter_cls(): class ScoreOutput(BaseModel): score: int @@ -760,7 +760,7 @@ def test_custom_converter_cls(): mock_to_pydantic.assert_called_once() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_increment_delegations_for_hierarchical_process(): scorer = Agent( role="Scorer", @@ -787,7 +787,7 @@ def test_increment_delegations_for_hierarchical_process(): increment_delegations.assert_called_once() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_increment_delegations_for_sequential_process(): manager = Agent( role="Manager", @@ -821,7 +821,7 @@ def test_increment_delegations_for_sequential_process(): increment_delegations.assert_called_once() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_increment_tool_errors(): from crewai.tools import tool @@ -1275,7 +1275,7 @@ def test_github_issue_3149_reproduction(): assert task.output_file == "test_output.txt" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_task_execution_times(): researcher = Agent( role="Researcher", @@ -1545,7 +1545,7 @@ def test_task_with_no_max_execution_time(): execute.assert_called_once() -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_task_with_max_execution_time(): from crewai.tools import tool @@ -1579,7 +1579,7 @@ def test_task_with_max_execution_time(): assert result.raw == "okay" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_task_with_max_execution_time_exceeded(): from crewai.tools import tool @@ -1613,7 +1613,7 @@ def test_task_with_max_execution_time_exceeded(): task.execute_sync(agent=researcher) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_task_interpolation_with_hyphens(): agent = Agent( role="Researcher", diff --git a/lib/crewai/tests/test_task_guardrails.py b/lib/crewai/tests/test_task_guardrails.py index 22572bfd3..c969015a1 100644 --- a/lib/crewai/tests/test_task_guardrails.py +++ b/lib/crewai/tests/test_task_guardrails.py @@ -172,7 +172,7 @@ def task_output(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_task_guardrail_process_output(task_output): guardrail = LLMGuardrail( description="Ensure the result has less than 10 words", llm=LLM(model="gpt-4o") @@ -192,7 +192,7 @@ def test_task_guardrail_process_output(task_output): assert result[1] == task_output.raw -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_guardrail_emits_events(sample_agent): started_guardrail = [] completed_guardrail = [] @@ -267,7 +267,7 @@ def test_guardrail_emits_events(sample_agent): assert completed_guardrail == expected_completed_events -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_guardrail_when_an_error_occurs(sample_agent, task_output): with ( patch( diff --git a/lib/crewai/tests/tools/agent_tools/test_agent_tools.py b/lib/crewai/tests/tools/agent_tools/test_agent_tools.py index 89d2798d6..c828255b1 100644 --- a/lib/crewai/tests/tools/agent_tools/test_agent_tools.py +++ b/lib/crewai/tests/tools/agent_tools/test_agent_tools.py @@ -23,7 +23,7 @@ def vcr_config(request) -> dict: } -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_delegate_work(): result = delegate_tool.run( coworker="researcher", @@ -37,7 +37,7 @@ def test_delegate_work(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_delegate_work_with_wrong_co_worker_variable(): result = delegate_tool.run( coworker="researcher", @@ -51,7 +51,7 @@ def test_delegate_work_with_wrong_co_worker_variable(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_ask_question(): result = ask_tool.run( coworker="researcher", @@ -65,7 +65,7 @@ def test_ask_question(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_ask_question_with_wrong_co_worker_variable(): result = ask_tool.run( coworker="researcher", @@ -79,7 +79,7 @@ def test_ask_question_with_wrong_co_worker_variable(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_delegate_work_withwith_coworker_as_array(): result = delegate_tool.run( coworker="[researcher]", @@ -93,7 +93,7 @@ def test_delegate_work_withwith_coworker_as_array(): ) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_ask_question_with_coworker_as_array(): result = ask_tool.run( coworker="[researcher]", diff --git a/lib/crewai/tests/tools/test_base_tool.py b/lib/crewai/tests/tools/test_base_tool.py index 2aa9ac8bf..c23f3b876 100644 --- a/lib/crewai/tests/tools/test_base_tool.py +++ b/lib/crewai/tests/tools/test_base_tool.py @@ -197,7 +197,7 @@ def test_run_does_not_call_asyncio_run_for_sync_tools(): assert sync_result == "Processed test synchronously" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_max_usage_count_is_respected(): class IteratingTool(BaseTool): name: str = "iterating_tool" diff --git a/lib/crewai/tests/tools/test_structured_tool.py b/lib/crewai/tests/tools/test_structured_tool.py index 15e034f3e..999c13072 100644 --- a/lib/crewai/tests/tools/test_structured_tool.py +++ b/lib/crewai/tests/tools/test_structured_tool.py @@ -197,7 +197,7 @@ def build_simple_crew(tool): return Crew(agents=[agent1], tasks=[say_hi_task]) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_async_tool_using_within_isolated_crew(custom_tool): crew = build_simple_crew(custom_tool) result = crew.kickoff() @@ -205,7 +205,7 @@ def test_async_tool_using_within_isolated_crew(custom_tool): assert result.raw == "Hello World from Custom Tool" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_async_tool_using_decorator_within_isolated_crew(custom_tool_decorator): crew = build_simple_crew(custom_tool_decorator) result = crew.kickoff() @@ -213,7 +213,7 @@ def test_async_tool_using_decorator_within_isolated_crew(custom_tool_decorator): assert result.raw == "Hello World from Custom Tool" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_async_tool_within_flow(custom_tool): from crewai.flow.flow import Flow @@ -230,7 +230,7 @@ def test_async_tool_within_flow(custom_tool): assert result.raw == "Hello World from Custom Tool" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_async_tool_using_decorator_within_flow(custom_tool_decorator): from crewai.flow.flow import Flow diff --git a/lib/crewai/tests/tracing/test_tracing.py b/lib/crewai/tests/tracing/test_tracing.py index 644c7a4b4..111c75b32 100644 --- a/lib/crewai/tests/tracing/test_tracing.py +++ b/lib/crewai/tests/tracing/test_tracing.py @@ -115,7 +115,7 @@ class TestTraceListenerSetup: "mark_trace_batch_as_failed": mock_mark_failed, } - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_trace_listener_collects_crew_events(self): """Test that trace listener properly collects events from crew execution""" @@ -144,7 +144,7 @@ class TestTraceListenerSetup: assert trace_listener.batch_manager.is_batch_initialized() assert trace_listener.batch_manager.current_batch is not None - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_batch_manager_finalizes_batch_clears_buffer(self): """Test that batch manager properly finalizes batch and clears buffer""" @@ -202,7 +202,7 @@ class TestTraceListenerSetup: assert finalize_mock.call_count >= 1 - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_events_collection_batch_manager(self, mock_plus_api_calls): """Test that trace listener properly collects events from crew execution""" @@ -255,7 +255,7 @@ class TestTraceListenerSetup: assert hasattr(event, "event_data") assert hasattr(event, "type") - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_trace_listener_disabled_when_env_false(self): """Test that trace listener doesn't make HTTP calls when tracing is disabled""" @@ -334,7 +334,7 @@ class TestTraceListenerSetup: FlowExample() assert mock_listener_setup.call_count >= 1 - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_trace_listener_ephemeral_batch(self): """Test that trace listener properly handles ephemeral batches""" with ( @@ -368,7 +368,7 @@ class TestTraceListenerSetup: "Batch should have been initialized for unauthenticated user" ) - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_trace_listener_with_authenticated_user(self): """Test that trace listener properly handles authenticated batches""" with patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}): @@ -428,7 +428,7 @@ class TestTraceListenerSetup: if hasattr(EventListener, "_instance"): EventListener._instance = None - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_first_time_user_trace_collection_with_timeout(self, mock_plus_api_calls): """Test first-time user trace collection logic with timeout behavior""" @@ -489,7 +489,7 @@ class TestTraceListenerSetup: mock_mark_completed.assert_called_once() - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_first_time_user_trace_collection_user_accepts(self, mock_plus_api_calls): """Test first-time user trace collection when user accepts viewing traces""" @@ -563,7 +563,7 @@ class TestTraceListenerSetup: mock_mark_completed.assert_called_once() - @pytest.mark.vcr(filter_headers=["authorization"]) + @pytest.mark.vcr() def test_first_time_user_trace_consolidation_logic(self, mock_plus_api_calls): """Test the consolidation logic for first-time users vs regular tracing""" with ( diff --git a/lib/crewai/tests/utilities/test_converter.py b/lib/crewai/tests/utilities/test_converter.py index 4fa6d2c2b..3c21c6ac7 100644 --- a/lib/crewai/tests/utilities/test_converter.py +++ b/lib/crewai/tests/utilities/test_converter.py @@ -352,7 +352,7 @@ def test_generate_model_description_dict_field() -> None: assert description["json_schema"]["strict"] is True -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_convert_with_instructions() -> None: llm = LLM(model="gpt-4o-mini") sample_text = "Name: Alice, Age: 30" @@ -374,7 +374,7 @@ def test_convert_with_instructions() -> None: assert output.age == 30 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_converter_with_llama3_2_model() -> None: llm = LLM(model="openrouter/meta-llama/llama-3.2-3b-instruct") sample_text = "Name: Alice Llama, Age: 30" @@ -410,7 +410,7 @@ def test_converter_with_llama3_1_model() -> None: assert output.age == 30 -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_converter_with_nested_model() -> None: llm = LLM(model="gpt-4o-mini") sample_text = "Name: John Doe\nAge: 30\nAddress: 123 Main St, Anytown, 12345" diff --git a/lib/crewai/tests/utilities/test_events.py b/lib/crewai/tests/utilities/test_events.py index 1eeba199a..7d8deb6b3 100644 --- a/lib/crewai/tests/utilities/test_events.py +++ b/lib/crewai/tests/utilities/test_events.py @@ -98,7 +98,7 @@ def reset_event_listener_singleton(): EventListener._instance._initialized = original_initialized -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_emits_start_kickoff_event( base_agent, base_task, reset_event_listener_singleton ): @@ -132,7 +132,7 @@ def test_crew_emits_start_kickoff_event( assert received_events[0].type == "crew_kickoff_started" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_emits_end_kickoff_event(base_agent, base_task): received_events = [] event_received = threading.Event() @@ -155,7 +155,7 @@ def test_crew_emits_end_kickoff_event(base_agent, base_task): assert received_events[0].type == "crew_kickoff_completed" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_emits_test_kickoff_type_event(base_agent, base_task): received_events = [] @@ -188,7 +188,7 @@ def test_crew_emits_test_kickoff_type_event(base_agent, base_task): assert received_events[2].type == "crew_test_completed" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_emits_kickoff_failed_event(base_agent, base_task): received_events = [] event_received = threading.Event() @@ -214,7 +214,7 @@ def test_crew_emits_kickoff_failed_event(base_agent, base_task): assert received_events[0].type == "crew_kickoff_failed" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_emits_start_task_event(base_agent, base_task): received_events = [] event_received = threading.Event() @@ -234,7 +234,7 @@ def test_crew_emits_start_task_event(base_agent, base_task): assert received_events[0].type == "task_started" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_crew_emits_end_task_event( base_agent, base_task, reset_event_listener_singleton ): @@ -268,7 +268,7 @@ def test_crew_emits_end_task_event( assert received_events[0].type == "task_completed" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_task_emits_failed_event_on_execution_error(base_agent, base_task): received_events = [] received_sources = [] @@ -310,7 +310,7 @@ def test_task_emits_failed_event_on_execution_error(base_agent, base_task): assert received_events[0].type == "task_failed" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_emits_execution_started_and_completed_events(base_agent, base_task): received_events = [] lock = threading.Lock() @@ -349,7 +349,7 @@ def test_agent_emits_execution_started_and_completed_events(base_agent, base_tas assert received_events[1].type == "agent_execution_completed" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_agent_emits_execution_error_event(base_agent, base_task): received_events = [] event_received = threading.Event() @@ -392,7 +392,7 @@ class SayHiTool(BaseTool): return "hi" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_tools_emits_finished_events(): received_events = [] event_received = threading.Event() @@ -429,7 +429,7 @@ def test_tools_emits_finished_events(): assert isinstance(received_events[0].timestamp, datetime) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_tools_emits_error_events(): received_events = [] lock = threading.Lock() @@ -605,7 +605,7 @@ def test_flow_emits_method_execution_started_event(): assert event.type == "method_execution_started" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_register_handler_adds_new_handler(base_agent, base_task): received_events = [] event_received = threading.Event() @@ -625,7 +625,7 @@ def test_register_handler_adds_new_handler(base_agent, base_task): assert received_events[0].type == "crew_kickoff_started" -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_multiple_handlers_for_same_event(base_agent, base_task): received_events_1 = [] received_events_2 = [] @@ -703,7 +703,7 @@ def test_flow_emits_method_execution_failed_event(): assert received_events[0].error == error -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_emits_call_started_event(): received_events = [] @@ -729,7 +729,7 @@ def test_llm_emits_call_started_event(): assert received_events[0].task_id is None -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() @pytest.mark.isolated def test_llm_emits_call_failed_event(): received_events = [] @@ -762,7 +762,7 @@ def test_llm_emits_call_failed_event(): assert received_events[0].task_id is None -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_emits_stream_chunk_events(): """Test that LLM emits stream chunk events when streaming is enabled.""" received_chunks = [] @@ -790,7 +790,7 @@ def test_llm_emits_stream_chunk_events(): assert "".join(received_chunks) == response -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_no_stream_chunks_when_streaming_disabled(): """Test that LLM doesn't emit stream chunk events when streaming is disabled.""" received_chunks = [] @@ -812,7 +812,7 @@ def test_llm_no_stream_chunks_when_streaming_disabled(): assert response and isinstance(response, str) -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_streaming_fallback_to_non_streaming(): """Test that streaming falls back to non-streaming when there's an error.""" received_chunks = [] @@ -870,7 +870,7 @@ def test_streaming_fallback_to_non_streaming(): llm.call = original_call -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_streaming_empty_response_handling(): """Test that streaming handles empty responses correctly.""" received_chunks = [] @@ -918,7 +918,7 @@ def test_streaming_empty_response_handling(): llm.call = original_call -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_stream_llm_emits_event_with_task_and_agent_info(): completed_event = [] failed_event = [] @@ -990,7 +990,7 @@ def test_stream_llm_emits_event_with_task_and_agent_info(): assert set(all_task_name) == {task.name or task.description} -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_emits_event_with_task_and_agent_info(base_agent, base_task): completed_event = [] failed_event = [] @@ -1043,7 +1043,7 @@ def test_llm_emits_event_with_task_and_agent_info(base_agent, base_task): assert set(all_task_name) == {base_task.name or base_task.description} -@pytest.mark.vcr(filter_headers=["authorization"]) +@pytest.mark.vcr() def test_llm_emits_event_with_lite_agent(): completed_event = [] failed_event = []