mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-06 22:58:30 +00:00
* WIP crew events emitter * Refactor event handling and introduce new event types - Migrate from global `emit` function to `event_bus.emit` - Add new event types for task failures, tool usage, and agent execution - Update event listeners and event bus to support more granular event tracking - Remove deprecated event emission methods - Improve event type consistency and add more detailed event information * Add event emission for agent execution lifecycle - Emit AgentExecutionStarted and AgentExecutionError events - Update CrewAgentExecutor to use event_bus for tracking agent execution - Refactor error handling to include event emission - Minor code formatting improvements in task.py and crew_agent_executor.py - Fix a typo in test file * Refactor event system and add third-party event listeners - Move event_bus import to correct module paths - Introduce BaseEventListener abstract base class - Add AgentOpsListener for third-party event tracking - Update event listener initialization and setup - Clean up event-related imports and exports * Enhance event system type safety and error handling - Improve type annotations for event bus and event types - Add null checks for agent and task in event emissions - Update import paths for base tool and base agent - Refactor event listener type hints - Remove unnecessary print statements - Update test configurations to match new event handling * Refactor event classes to improve type safety and naming consistency - Rename event classes to have explicit 'Event' suffix (e.g., TaskStartedEvent) - Update import statements and references across multiple files - Remove deprecated events.py module - Enhance event type hints and configurations - Clean up unnecessary event-related code * Add default model for CrewEvaluator and fix event import order - Set default model to "gpt-4o-mini" in CrewEvaluator when no model is specified - Reorder event-related imports in task.py to follow standard import conventions - Update event bus initialization method return type hint - Export event_bus in events/__init__.py * Fix tool usage and event import handling - Update tool usage to use `.get()` method when checking tool name - Remove unnecessary `__all__` export list in events/__init__.py * Refactor Flow and Agent event handling to use event_bus - Remove `event_emitter` from Flow class and replace with `event_bus.emit()` - Update Flow and Agent tests to use event_bus event listeners - Remove redundant event emissions in Flow methods - Add debug print statements in Flow execution - Simplify event tracking in test cases * Enhance event handling for Crew, Task, and Event classes - Add crew name to failed event types (CrewKickoffFailedEvent, CrewTrainFailedEvent, CrewTestFailedEvent) - Update Task events to remove redundant task and context attributes - Refactor EventListener to use Logger for consistent event logging - Add new event types for Crew train and test events - Improve event bus event tracking in test cases * Remove telemetry and tracing dependencies from Task and Flow classes - Remove telemetry-related imports and private attributes from Task class - Remove `_telemetry` attribute from Flow class - Update event handling to emit events without direct telemetry tracking - Simplify task and flow execution by removing explicit telemetry spans - Move telemetry-related event handling to EventListener * Clean up unused imports and event-related code - Remove unused imports from various event and flow-related files - Reorder event imports to follow standard conventions - Remove unnecessary event type references - Simplify import statements in event and flow modules * Update crew test to validate verbose output and kickoff_for_each method - Enhance test_crew_verbose_output to check specific listener log messages - Modify test_kickoff_for_each_invalid_input to use Pydantic validation error - Improve test coverage for crew logging and input validation * Update crew test verbose output with improved emoji icons - Replace task and agent completion icons from 👍 to ✅ - Enhance readability of test output logging - Maintain consistent test coverage for crew verbose output * Add MethodExecutionFailedEvent to handle flow method execution failures - Introduce new MethodExecutionFailedEvent in flow_events module - Update Flow class to catch and emit method execution failures - Add event listener for method execution failure events - Update event-related imports to include new event type - Enhance test coverage for method execution failure handling * Propagate method execution failures in Flow class - Modify Flow class to re-raise exceptions after emitting MethodExecutionFailedEvent - Reorder MethodExecutionFailedEvent import to maintain consistent import style * Enable test coverage for Flow method execution failure event - Uncomment pytest.raises() in test_events to verify exception handling - Ensure test validates MethodExecutionFailedEvent emission during flow kickoff * Add event handling for tool usage events - Introduce event listeners for ToolUsageFinishedEvent and ToolUsageErrorEvent - Log tool usage events with descriptive emoji icons (✅ and ❌) - Update event_listener to track and log tool usage lifecycle * Reorder and clean up event imports in event_listener - Reorganize imports for tool usage events and other event types - Maintain consistent import ordering and remove unused imports - Ensure clean and organized import structure in event_listener module * moving to dedicated eventlistener * dont forget crew level * Refactor AgentOps event listener for crew-level tracking - Modify AgentOpsListener to handle crew-level events - Initialize and end AgentOps session at crew kickoff and completion - Create agents for each crew member during session initialization - Improve session management and event recording - Clean up and simplify event handling logic * Update test_events to validate tool usage error event handling - Modify test to assert single error event with correct attributes - Use pytest.raises() to verify error event generation - Simplify error event validation in test case * Improve AgentOps listener type hints and formatting - Add string type hints for AgentOps classes to resolve potential import issues - Clean up unnecessary whitespace and improve code indentation - Simplify initialization and event handling logic * Update test_events to validate multiple tool usage events - Modify test to assert 75 events instead of a single error event - Remove pytest.raises() check, allowing crew kickoff to complete - Adjust event validation to support broader event tracking * Rename event_bus to crewai_event_bus for improved clarity and specificity - Replace all references to `event_bus` with `crewai_event_bus` - Update import statements across multiple files - Remove the old `event_bus.py` file - Maintain existing event handling functionality * Enhance EventListener with singleton pattern and color configuration - Implement singleton pattern for EventListener to ensure single instance - Add default color configuration using EMITTER_COLOR from constants - Modify log method calls to use default color and remove redundant color parameters - Improve initialization logic to prevent multiple initializations * Add FlowPlotEvent and update event bus to support flow plotting - Introduce FlowPlotEvent to track flow plotting events - Replace Telemetry method with event bus emission in Flow.plot() - Update event bus to support new FlowPlotEvent type - Add test case to validate flow plotting event emission * Remove RunType enum and clean up crew events module - Delete unused RunType enum from crew_events.py - Simplify crew_events.py by removing unnecessary enum definition - Improve code clarity by removing unneeded imports * Enhance event handling for tool usage and agent execution - Add new events for tool usage: ToolSelectionErrorEvent, ToolValidateInputErrorEvent - Improve error tracking and event emission in ToolUsage and LLM classes - Update AgentExecutionStartedEvent to use task_prompt instead of inputs - Add comprehensive test coverage for new event types and error scenarios * Refactor event system and improve crew testing - Extract base CrewEvent class to a new base_events.py module - Update event imports across multiple event-related files - Modify CrewTestStartedEvent to use eval_llm instead of openai_model_name - Add LLM creation validation in crew testing method - Improve type handling and event consistency * Refactor task events to use base CrewEvent - Move CrewEvent import from crew_events to base_events - Remove unnecessary blank lines in task_events.py - Simplify event class structure for task-related events * Update AgentExecutionStartedEvent to use task_prompt - Modify test_events.py to use task_prompt instead of inputs - Simplify event input validation in test case - Align with recent event system refactoring * Improve type hinting for TaskCompletedEvent handler - Add explicit type annotation for TaskCompletedEvent in event_listener.py - Enhance type safety for event handling in EventListener * Improve test_validate_tool_input_invalid_input with mock objects - Add explicit mock objects for agent and action in test case - Ensure proper string values for mock agent and action attributes - Simplify test setup for ToolUsage validation method * Remove ToolUsageStartedEvent emission in tool usage process - Remove unnecessary event emission for tool usage start - Simplify tool usage event handling - Eliminate redundant event data preparation step * refactor: clean up and organize imports in llm and flow modules * test: Improve flow persistence test cases and logging
428 lines
14 KiB
Python
428 lines
14 KiB
Python
import os
|
|
from time import sleep
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
import pytest
|
|
from pydantic import BaseModel
|
|
|
|
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
|
from crewai.llm import LLM
|
|
from crewai.utilities.events import crewai_event_bus
|
|
from crewai.utilities.events.tool_usage_events import ToolExecutionErrorEvent
|
|
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
|
|
|
|
|
# TODO: This test fails without print statement, which makes me think that something is happening asynchronously that we need to eventually fix and dive deeper into at a later date
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_llm_callback_replacement():
|
|
llm1 = LLM(model="gpt-4o-mini")
|
|
llm2 = LLM(model="gpt-4o-mini")
|
|
|
|
calc_handler_1 = TokenCalcHandler(token_cost_process=TokenProcess())
|
|
calc_handler_2 = TokenCalcHandler(token_cost_process=TokenProcess())
|
|
|
|
result1 = llm1.call(
|
|
messages=[{"role": "user", "content": "Hello, world!"}],
|
|
callbacks=[calc_handler_1],
|
|
)
|
|
print("result1:", result1)
|
|
usage_metrics_1 = calc_handler_1.token_cost_process.get_summary()
|
|
print("usage_metrics_1:", usage_metrics_1)
|
|
|
|
result2 = llm2.call(
|
|
messages=[{"role": "user", "content": "Hello, world from another agent!"}],
|
|
callbacks=[calc_handler_2],
|
|
)
|
|
sleep(5)
|
|
print("result2:", result2)
|
|
usage_metrics_2 = calc_handler_2.token_cost_process.get_summary()
|
|
print("usage_metrics_2:", usage_metrics_2)
|
|
|
|
# The first handler should not have been updated
|
|
assert usage_metrics_1.successful_requests == 1
|
|
assert usage_metrics_2.successful_requests == 1
|
|
assert usage_metrics_1 == calc_handler_1.token_cost_process.get_summary()
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_llm_call_with_string_input():
|
|
llm = LLM(model="gpt-4o-mini")
|
|
|
|
# Test the call method with a string input
|
|
result = llm.call("Return the name of a random city in the world.")
|
|
assert isinstance(result, str)
|
|
assert len(result.strip()) > 0 # Ensure the response is not empty
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_llm_call_with_string_input_and_callbacks():
|
|
llm = LLM(model="gpt-4o-mini")
|
|
calc_handler = TokenCalcHandler(token_cost_process=TokenProcess())
|
|
|
|
# Test the call method with a string input and callbacks
|
|
result = llm.call(
|
|
"Tell me a joke.",
|
|
callbacks=[calc_handler],
|
|
)
|
|
usage_metrics = calc_handler.token_cost_process.get_summary()
|
|
|
|
assert isinstance(result, str)
|
|
assert len(result.strip()) > 0
|
|
assert usage_metrics.successful_requests == 1
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_llm_call_with_message_list():
|
|
llm = LLM(model="gpt-4o-mini")
|
|
messages = [{"role": "user", "content": "What is the capital of France?"}]
|
|
|
|
# Test the call method with a list of messages
|
|
result = llm.call(messages)
|
|
assert isinstance(result, str)
|
|
assert "Paris" in result
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_llm_call_with_tool_and_string_input():
|
|
llm = LLM(model="gpt-4o-mini")
|
|
|
|
def get_current_year() -> str:
|
|
"""Returns the current year as a string."""
|
|
from datetime import datetime
|
|
|
|
return str(datetime.now().year)
|
|
|
|
# Create tool schema
|
|
tool_schema = {
|
|
"type": "function",
|
|
"function": {
|
|
"name": "get_current_year",
|
|
"description": "Returns the current year as a string.",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {},
|
|
"required": [],
|
|
},
|
|
},
|
|
}
|
|
|
|
# Available functions mapping
|
|
available_functions = {"get_current_year": get_current_year}
|
|
|
|
# Test the call method with a string input and tool
|
|
result = llm.call(
|
|
"What is the current year?",
|
|
tools=[tool_schema],
|
|
available_functions=available_functions,
|
|
)
|
|
|
|
assert isinstance(result, str)
|
|
assert result == get_current_year()
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_llm_call_with_tool_and_message_list():
|
|
llm = LLM(model="gpt-4o-mini")
|
|
|
|
def square_number(number: int) -> int:
|
|
"""Returns the square of a number."""
|
|
return number * number
|
|
|
|
# Create tool schema
|
|
tool_schema = {
|
|
"type": "function",
|
|
"function": {
|
|
"name": "square_number",
|
|
"description": "Returns the square of a number.",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
"number": {"type": "integer", "description": "The number to square"}
|
|
},
|
|
"required": ["number"],
|
|
},
|
|
},
|
|
}
|
|
|
|
# Available functions mapping
|
|
available_functions = {"square_number": square_number}
|
|
|
|
messages = [{"role": "user", "content": "What is the square of 5?"}]
|
|
|
|
# Test the call method with messages and tool
|
|
result = llm.call(
|
|
messages,
|
|
tools=[tool_schema],
|
|
available_functions=available_functions,
|
|
)
|
|
|
|
assert isinstance(result, int)
|
|
assert result == 25
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_llm_passes_additional_params():
|
|
llm = LLM(
|
|
model="gpt-4o-mini",
|
|
vertex_credentials="test_credentials",
|
|
vertex_project="test_project",
|
|
)
|
|
|
|
messages = [{"role": "user", "content": "Hello, world!"}]
|
|
|
|
with patch("litellm.completion") as mocked_completion:
|
|
# Create mocks for response structure
|
|
mock_message = MagicMock()
|
|
mock_message.content = "Test response"
|
|
mock_choice = MagicMock()
|
|
mock_choice.message = mock_message
|
|
mock_response = MagicMock()
|
|
mock_response.choices = [mock_choice]
|
|
mock_response.usage = {
|
|
"prompt_tokens": 5,
|
|
"completion_tokens": 5,
|
|
"total_tokens": 10,
|
|
}
|
|
|
|
# Set up the mocked completion to return the mock response
|
|
mocked_completion.return_value = mock_response
|
|
|
|
result = llm.call(messages)
|
|
|
|
# Assert that litellm.completion was called once
|
|
mocked_completion.assert_called_once()
|
|
|
|
# Retrieve the actual arguments with which litellm.completion was called
|
|
_, kwargs = mocked_completion.call_args
|
|
|
|
# Check that the additional_params were passed to litellm.completion
|
|
assert kwargs["vertex_credentials"] == "test_credentials"
|
|
assert kwargs["vertex_project"] == "test_project"
|
|
|
|
# Also verify that other expected parameters are present
|
|
assert kwargs["model"] == "gpt-4o-mini"
|
|
assert kwargs["messages"] == messages
|
|
|
|
# Check the result from llm.call
|
|
assert result == "Test response"
|
|
|
|
|
|
def test_get_custom_llm_provider_openrouter():
|
|
llm = LLM(model="openrouter/deepseek/deepseek-chat")
|
|
assert llm._get_custom_llm_provider() == "openrouter"
|
|
|
|
|
|
def test_get_custom_llm_provider_gemini():
|
|
llm = LLM(model="gemini/gemini-1.5-pro")
|
|
assert llm._get_custom_llm_provider() == "gemini"
|
|
|
|
|
|
def test_get_custom_llm_provider_openai():
|
|
llm = LLM(model="gpt-4")
|
|
assert llm._get_custom_llm_provider() == "openai"
|
|
|
|
|
|
def test_validate_call_params_supported():
|
|
class DummyResponse(BaseModel):
|
|
a: int
|
|
|
|
# Patch supports_response_schema to simulate a supported model.
|
|
with patch("crewai.llm.supports_response_schema", return_value=True):
|
|
llm = LLM(
|
|
model="openrouter/deepseek/deepseek-chat", response_format=DummyResponse
|
|
)
|
|
# Should not raise any error.
|
|
llm._validate_call_params()
|
|
|
|
|
|
def test_validate_call_params_not_supported():
|
|
class DummyResponse(BaseModel):
|
|
a: int
|
|
|
|
# Patch supports_response_schema to simulate an unsupported model.
|
|
with patch("crewai.llm.supports_response_schema", return_value=False):
|
|
llm = LLM(model="gemini/gemini-1.5-pro", response_format=DummyResponse)
|
|
with pytest.raises(ValueError) as excinfo:
|
|
llm._validate_call_params()
|
|
assert "does not support response_format" in str(excinfo.value)
|
|
|
|
|
|
def test_validate_call_params_no_response_format():
|
|
# When no response_format is provided, no validation error should occur.
|
|
llm = LLM(model="gemini/gemini-1.5-pro", response_format=None)
|
|
llm._validate_call_params()
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_o3_mini_reasoning_effort_high():
|
|
llm = LLM(
|
|
model="o3-mini",
|
|
reasoning_effort="high",
|
|
)
|
|
result = llm.call("What is the capital of France?")
|
|
assert isinstance(result, str)
|
|
assert "Paris" in result
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_o3_mini_reasoning_effort_low():
|
|
llm = LLM(
|
|
model="o3-mini",
|
|
reasoning_effort="low",
|
|
)
|
|
result = llm.call("What is the capital of France?")
|
|
assert isinstance(result, str)
|
|
assert "Paris" in result
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_o3_mini_reasoning_effort_medium():
|
|
llm = LLM(
|
|
model="o3-mini",
|
|
reasoning_effort="medium",
|
|
)
|
|
result = llm.call("What is the capital of France?")
|
|
assert isinstance(result, str)
|
|
assert "Paris" in result
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
@pytest.fixture
|
|
def anthropic_llm():
|
|
"""Fixture providing an Anthropic LLM instance."""
|
|
return LLM(model="anthropic/claude-3-sonnet")
|
|
|
|
|
|
@pytest.fixture
|
|
def system_message():
|
|
"""Fixture providing a system message."""
|
|
return {"role": "system", "content": "test"}
|
|
|
|
|
|
@pytest.fixture
|
|
def user_message():
|
|
"""Fixture providing a user message."""
|
|
return {"role": "user", "content": "test"}
|
|
|
|
|
|
def test_anthropic_message_formatting_edge_cases(anthropic_llm):
|
|
"""Test edge cases for Anthropic message formatting."""
|
|
# Test None messages
|
|
with pytest.raises(TypeError, match="Messages cannot be None"):
|
|
anthropic_llm._format_messages_for_provider(None)
|
|
|
|
# Test empty message list
|
|
formatted = anthropic_llm._format_messages_for_provider([])
|
|
assert len(formatted) == 1
|
|
assert formatted[0]["role"] == "user"
|
|
assert formatted[0]["content"] == "."
|
|
|
|
# Test invalid message format
|
|
with pytest.raises(TypeError, match="Invalid message format"):
|
|
anthropic_llm._format_messages_for_provider([{"invalid": "message"}])
|
|
|
|
|
|
def test_anthropic_model_detection():
|
|
"""Test Anthropic model detection with various formats."""
|
|
models = [
|
|
("anthropic/claude-3", True),
|
|
("claude-instant", True),
|
|
("claude/v1", True),
|
|
("gpt-4", False),
|
|
("", False),
|
|
("anthropomorphic", False), # Should not match partial words
|
|
]
|
|
|
|
for model, expected in models:
|
|
llm = LLM(model=model)
|
|
assert llm.is_anthropic == expected, f"Failed for model: {model}"
|
|
|
|
|
|
def test_anthropic_message_formatting(anthropic_llm, system_message, user_message):
|
|
"""Test Anthropic message formatting with fixtures."""
|
|
# Test when first message is system
|
|
formatted = anthropic_llm._format_messages_for_provider([system_message])
|
|
assert len(formatted) == 2
|
|
assert formatted[0]["role"] == "user"
|
|
assert formatted[0]["content"] == "."
|
|
assert formatted[1] == system_message
|
|
|
|
# Test when first message is already user
|
|
formatted = anthropic_llm._format_messages_for_provider([user_message])
|
|
assert len(formatted) == 1
|
|
assert formatted[0] == user_message
|
|
|
|
# Test with empty message list
|
|
formatted = anthropic_llm._format_messages_for_provider([])
|
|
assert len(formatted) == 1
|
|
assert formatted[0]["role"] == "user"
|
|
assert formatted[0]["content"] == "."
|
|
|
|
# Test with non-Anthropic model (should not modify messages)
|
|
non_anthropic_llm = LLM(model="gpt-4")
|
|
formatted = non_anthropic_llm._format_messages_for_provider([system_message])
|
|
assert len(formatted) == 1
|
|
assert formatted[0] == system_message
|
|
|
|
|
|
def test_deepseek_r1_with_open_router():
|
|
if not os.getenv("OPEN_ROUTER_API_KEY"):
|
|
pytest.skip("OPEN_ROUTER_API_KEY not set; skipping test.")
|
|
|
|
llm = LLM(
|
|
model="openrouter/deepseek/deepseek-r1",
|
|
base_url="https://openrouter.ai/api/v1",
|
|
api_key=os.getenv("OPEN_ROUTER_API_KEY"),
|
|
)
|
|
result = llm.call("What is the capital of France?")
|
|
assert isinstance(result, str)
|
|
assert "Paris" in result
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_tool_execution_error_event():
|
|
llm = LLM(model="gpt-4o-mini")
|
|
|
|
def failing_tool(param: str) -> str:
|
|
"""This tool always fails."""
|
|
raise Exception("Tool execution failed!")
|
|
|
|
tool_schema = {
|
|
"type": "function",
|
|
"function": {
|
|
"name": "failing_tool",
|
|
"description": "This tool always fails.",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
"param": {"type": "string", "description": "A test parameter"}
|
|
},
|
|
"required": ["param"],
|
|
},
|
|
},
|
|
}
|
|
|
|
received_events = []
|
|
|
|
@crewai_event_bus.on(ToolExecutionErrorEvent)
|
|
def event_handler(source, event):
|
|
received_events.append(event)
|
|
|
|
available_functions = {"failing_tool": failing_tool}
|
|
|
|
messages = [{"role": "user", "content": "Use the failing tool"}]
|
|
|
|
llm.call(
|
|
messages,
|
|
tools=[tool_schema],
|
|
available_functions=available_functions,
|
|
)
|
|
|
|
assert len(received_events) == 1
|
|
event = received_events[0]
|
|
assert isinstance(event, ToolExecutionErrorEvent)
|
|
assert event.tool_name == "failing_tool"
|
|
assert event.tool_args == {"param": "test"}
|
|
assert event.tool_class == failing_tool
|
|
assert "Tool execution failed!" in event.error
|