mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 00:28:31 +00:00
Fix tests that were checking usage metrics
This commit is contained in:
@@ -10,13 +10,12 @@ from crewai.pipeline.pipeline_run_result import PipelineRunResult
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
DEFAULT_TOKEN_USAGE = {
|
||||
"total_tokens": 100,
|
||||
"prompt_tokens": 50,
|
||||
"completion_tokens": 50,
|
||||
}
|
||||
DEFAULT_TOKEN_USAGE = UsageMetrics(
|
||||
total_tokens=100, prompt_tokens=50, completion_tokens=50, successful_requests=3
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -443,6 +442,7 @@ Options:
|
||||
- Should the final output include the accumulation of previous stages' outputs?
|
||||
"""
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pipeline_data_accumulation(mock_crew_factory):
|
||||
crew1 = mock_crew_factory(name="Crew 1", output_json_dict={"key1": "value1"})
|
||||
|
||||
Reference in New Issue
Block a user