style: fix linter issues

This commit is contained in:
Lucas Gomide
2025-07-11 00:09:49 -03:00
parent 3fee619798
commit de425450d4
5 changed files with 5 additions and 12 deletions

View File

@@ -58,5 +58,5 @@ __all__ = [
"create_default_evaluator",
"ExperimentRunner",
"ExperimentResults",
"TestCaseResult"
"ExperimentResult"
]

View File

@@ -1,7 +1,7 @@
import json
import os
from datetime import datetime, timezone
from typing import Any, Dict, Optional
from typing import Any
from pydantic import BaseModel
class ExperimentResult(BaseModel):

View File

@@ -2,7 +2,7 @@ from typing import Dict, Any
from rich.console import Console
from rich.table import Table
from rich.panel import Panel
from crewai.evaluation.experiment.result import ExperimentResult, ExperimentResults
from crewai.evaluation.experiment.result import ExperimentResults
class ExperimentResultsDisplay:
def __init__(self):

View File

@@ -1,7 +1,6 @@
from collections import defaultdict
from hashlib import md5
from typing import List, Dict, Union, Optional, Any
from rich.console import Console
from typing import List, Dict, Union, Any
from crewai import Crew
from crewai.evaluation import AgentEvaluator, create_default_evaluator

View File

@@ -1,15 +1,9 @@
import pytest
from unittest.mock import MagicMock, patch
import json
from crewai import llm
from crewai.agent import Agent
from crewai.evaluation import metrics
from crewai.task import Task
from crewai.crew import Crew
from crewai.evaluation.experiment.runner import ExperimentRunner
from crewai.evaluation.experiment.result import ExperimentResult, ExperimentResults
from crewai.evaluation.agent_evaluator import AgentEvaluator
from crewai.evaluation.experiment.result import ExperimentResults
from crewai.evaluation.evaluation_display import AgentAggregatedEvaluationResult
from crewai.evaluation.base_evaluator import MetricCategory, EvaluationScore