mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Enable PLC0414 on ruff (#165828)
This PR enables `PLC0414` that fixes redundant import aliases. Pull Request resolved: https://github.com/pytorch/pytorch/pull/165828 Approved by: https://github.com/albanD
This commit is contained in:
parent
34ed7a8f0d
commit
f9953e0f61
|
|
@ -208,6 +208,7 @@ select = [
|
|||
"PLC1802", # len({expression}) used as condition without comparison
|
||||
"PLC0205", # string as __slots__
|
||||
"PLC3002", # unnecessary-direct-lambda-call
|
||||
"PLC0414", # Import alias does not rename original package
|
||||
"PLE",
|
||||
"PLR0133", # constant comparison
|
||||
"PLR0206", # property with params
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
import io
|
||||
import os
|
||||
import sys
|
||||
from itertools import product as product
|
||||
from itertools import product
|
||||
from typing import Union
|
||||
|
||||
import hypothesis.strategies as st
|
||||
|
|
|
|||
|
|
@ -426,7 +426,7 @@ class TestExecutionTrace(TestCase):
|
|||
@skipCPUIf(True, "skip CPU device for testing profiling triton")
|
||||
def test_execution_trace_env_enabled_with_pt2(self, device):
|
||||
# clean up the local cache for triton kernel
|
||||
from torch._inductor.codecache import PyCodeCache as PyCodeCache
|
||||
from torch._inductor.codecache import PyCodeCache
|
||||
|
||||
PyCodeCache.cache_clear(purge=True)
|
||||
|
||||
|
|
@ -488,7 +488,7 @@ class TestExecutionTrace(TestCase):
|
|||
@skipCPUIf(True, "skip CPU device for testing profiling triton")
|
||||
def test_triton_fx_graph_with_et(self, device):
|
||||
# clean up the local cache for triton kernel
|
||||
from torch._inductor.codecache import PyCodeCache as PyCodeCache
|
||||
from torch._inductor.codecache import PyCodeCache
|
||||
|
||||
PyCodeCache.cache_clear(purge=True)
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ from torch.testing._internal.common_quantization import (
|
|||
LinearAddModel,
|
||||
)
|
||||
from torch.testing._internal.common_utils import TestCase
|
||||
from torch.utils import bundled_inputs as bundled_inputs
|
||||
from torch.utils import bundled_inputs
|
||||
|
||||
|
||||
class myMod(torch.nn.Module):
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ REPO_ROOT = Path(__file__).resolve().parents[3]
|
|||
sys.path.append(str(REPO_ROOT))
|
||||
|
||||
from tools.test.heuristics.test_interface import TestTD
|
||||
from tools.testing.target_determination.determinator import TestPrioritizations
|
||||
from tools.testing.target_determination.heuristics.filepath import (
|
||||
file_matches_keyword,
|
||||
get_keywords,
|
||||
|
|
@ -22,6 +21,7 @@ from tools.testing.target_determination.heuristics.filepath import (
|
|||
from tools.testing.target_determination.heuristics.historical_class_failure_correlation import (
|
||||
HistoricalClassFailurCorrelation,
|
||||
)
|
||||
from tools.testing.target_determination.heuristics.interface import TestPrioritizations
|
||||
from tools.testing.target_determination.heuristics.previously_failed_in_pr import (
|
||||
get_previous_failures,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -19,9 +19,9 @@ from tools.stats.import_test_stats import (
|
|||
)
|
||||
from tools.stats.upload_metrics import emit_metric
|
||||
from tools.testing.discover_tests import TESTS
|
||||
from tools.testing.target_determination.determinator import (
|
||||
from tools.testing.target_determination.determinator import get_test_prioritizations
|
||||
from tools.testing.target_determination.heuristics.interface import (
|
||||
AggregatedHeuristics,
|
||||
get_test_prioritizations,
|
||||
TestPrioritizations,
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -4,9 +4,9 @@ import sys
|
|||
from typing import Any
|
||||
|
||||
from tools.testing.target_determination.heuristics import (
|
||||
AggregatedHeuristics as AggregatedHeuristics,
|
||||
AggregatedHeuristics,
|
||||
HEURISTICS,
|
||||
TestPrioritizations as TestPrioritizations,
|
||||
TestPrioritizations,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ from .fx_passes.post_grad import post_grad_passes, view_to_reshape
|
|||
from .fx_passes.pre_grad import pre_grad_passes
|
||||
from .graph import GraphLowering
|
||||
from .ir import get_device_type, IRNode
|
||||
from .output_code import complex_memory_overlap as complex_memory_overlap # noqa: F401
|
||||
from .output_code import complex_memory_overlap # noqa: F401
|
||||
from .triton_bundler import TritonBundler
|
||||
from .utils import (
|
||||
align_inputs_from_check_idxs,
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import torch._inductor.async_compile # noqa: F401 required to warm up AsyncComp
|
|||
from torch._inductor.output_code import CompiledFxGraphConstants, OutputCode
|
||||
|
||||
from .compile_fx import _CompileFxKwargs, _InProcessFxCompile, FxCompile
|
||||
from .output_code import complex_memory_overlap as complex_memory_overlap # noqa: F401
|
||||
from .output_code import complex_memory_overlap # noqa: F401
|
||||
|
||||
|
||||
# When async compile works with cache, remove the disabling below
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ from . import config
|
|||
from .compile_fx import _CompileFxKwargs, _InProcessFxCompile, FxCompile, log
|
||||
from .debug import DebugContext
|
||||
from .graph import GraphLowering
|
||||
from .output_code import complex_memory_overlap as complex_memory_overlap # noqa: F401
|
||||
from .output_code import complex_memory_overlap # noqa: F401
|
||||
from .virtualized import V
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ from .compile_fx_ext import (
|
|||
_WireProtocolPickledInput,
|
||||
_WireProtocolPickledOutput,
|
||||
)
|
||||
from .output_code import complex_memory_overlap as complex_memory_overlap # noqa: F401
|
||||
from .output_code import complex_memory_overlap # noqa: F401
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ S = TypeVar("S", bound="StorageWeakRefWrapper")
|
|||
if torch.backends.cuda.is_built():
|
||||
from torch._C import (
|
||||
_cuda_CUDAAllocator_AllocatorState as AllocatorState,
|
||||
_set_cached_tensors_enabled as _set_cached_tensors_enabled,
|
||||
_set_cached_tensors_enabled,
|
||||
)
|
||||
else:
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ from unittest.mock import patch
|
|||
|
||||
import torch
|
||||
from functorch.compile import draw_graph, get_aot_graph_name, get_graph_being_compiled
|
||||
from torch import fx as fx
|
||||
from torch import fx
|
||||
from torch._dynamo.repro.after_aot import save_graph_repro
|
||||
from torch._dynamo.utils import get_debug_dir
|
||||
from torch._inductor import utils
|
||||
|
|
|
|||
|
|
@ -2674,7 +2674,7 @@ class FakeTensorMode(TorchDispatchMode):
|
|||
return maybe_propagate_real_tensors(fast_impl(self, *args, **kwargs))
|
||||
|
||||
# If there's a Python meta, prefer that over the decomposition
|
||||
from torch._decomp import meta_table as meta_table
|
||||
from torch._decomp import meta_table
|
||||
|
||||
if (
|
||||
func not in meta_table
|
||||
|
|
|
|||
|
|
@ -1038,7 +1038,7 @@ class ExecutionTraceObserver(_ITraceObserver):
|
|||
return
|
||||
|
||||
# Save the kernel paths for the generated kernels
|
||||
from torch._inductor.codecache import PyCodeCache as PyCodeCache
|
||||
from torch._inductor.codecache import PyCodeCache
|
||||
|
||||
kernel_files = [
|
||||
v.__file__
|
||||
|
|
|
|||
|
|
@ -18,15 +18,15 @@ from typing_extensions import Self
|
|||
# `as` imports have better static analysis support than assignment `ExposedType: TypeAlias = HiddenType`
|
||||
from torch import ( # noqa: F401
|
||||
device as _device,
|
||||
DispatchKey as DispatchKey,
|
||||
DispatchKey,
|
||||
dtype as _dtype,
|
||||
layout as _layout,
|
||||
qscheme as _qscheme,
|
||||
Size as Size,
|
||||
SymBool as SymBool,
|
||||
SymFloat as SymFloat,
|
||||
SymInt as SymInt,
|
||||
Tensor as Tensor,
|
||||
Size,
|
||||
SymBool,
|
||||
SymFloat,
|
||||
SymInt,
|
||||
Tensor,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -21,13 +21,13 @@ from typing_extensions import deprecated, TypeIs
|
|||
import torch.utils._pytree as python_pytree
|
||||
from torch.torch_version import TorchVersion as _TorchVersion
|
||||
from torch.utils._pytree import (
|
||||
is_namedtuple as is_namedtuple,
|
||||
is_namedtuple_class as is_namedtuple_class,
|
||||
is_namedtuple_instance as is_namedtuple_instance,
|
||||
is_structseq as is_structseq,
|
||||
is_structseq_class as is_structseq_class,
|
||||
is_structseq_instance as is_structseq_instance,
|
||||
KeyEntry as KeyEntry,
|
||||
is_namedtuple,
|
||||
is_namedtuple_class,
|
||||
is_namedtuple_instance,
|
||||
is_structseq,
|
||||
is_structseq_class,
|
||||
is_structseq_instance,
|
||||
KeyEntry,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from typing_extensions import deprecated
|
|||
|
||||
try:
|
||||
from torchgen.api.python import format_function_signature
|
||||
from torchgen.utils import FileManager as FileManager
|
||||
from torchgen.utils import FileManager
|
||||
except ImportError:
|
||||
import sys
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user