Fix tests that were checking usage metrics

This commit is contained in:
Brandon Hancock
2024-07-29 15:48:48 -04:00
parent e3182d135a
commit 619806f80d
2 changed files with 16 additions and 19 deletions

View File

@@ -10,13 +10,12 @@ from crewai.pipeline.pipeline_run_result import PipelineRunResult
from crewai.process import Process
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
from crewai.types.usage_metrics import UsageMetrics
from pydantic import BaseModel, ValidationError
DEFAULT_TOKEN_USAGE = {
"total_tokens": 100,
"prompt_tokens": 50,
"completion_tokens": 50,
}
DEFAULT_TOKEN_USAGE = UsageMetrics(
total_tokens=100, prompt_tokens=50, completion_tokens=50, successful_requests=3
)
@pytest.fixture
@@ -443,6 +442,7 @@ Options:
- Should the final output include the accumulation of previous stages' outputs?
"""
@pytest.mark.asyncio
async def test_pipeline_data_accumulation(mock_crew_factory):
crew1 = mock_crew_factory(name="Crew 1", output_json_dict={"key1": "value1"})