From 053367b1ed7419964b09c749f4ee98ae36a1a57f Mon Sep 17 00:00:00 2001 From: Fabrice Pont Date: Wed, 4 Oct 2023 23:52:52 +0000 Subject: [PATCH] fix: flake8-bugbear code B024 (#107265) See #106571 item B024 This fix concerns the addition of `abstractmethod` to methods declared inside abstract classes. Should I also include PEP8 compliant reformatting on the files I had to modify ? Pull Request resolved: https://github.com/pytorch/pytorch/pull/107265 Approved by: https://github.com/kit1980 --- .flake8 | 2 +- pyproject.toml | 2 +- torch/ao/quantization/fx/quantize_handler.py | 24 +++++++------------ torch/ao/quantization/quantizer/quantizer.py | 2 +- torch/distributed/_shard/sharding_spec/api.py | 2 +- .../_optimizer_overlap/optimizer_overlap.py | 10 ++++++-- torch/onnx/_internal/fx/_pass.py | 2 +- torchgen/api/types/types_base.py | 5 +++- 8 files changed, 26 insertions(+), 23 deletions(-) diff --git a/.flake8 b/.flake8 index ad26691425c..dfc371b386a 100644 --- a/.flake8 +++ b/.flake8 @@ -14,7 +14,7 @@ ignore = # to line this up with executable bit EXE001, # these ignores are from flake8-bugbear; please fix! - B007,B008,B017,B019,B020,B023,B024,B026,B028,B903,B904,B905,B906,B907 + B007,B008,B017,B019,B020,B023,B026,B028,B903,B904,B905,B906,B907 # these ignores are from flake8-comprehensions; please fix! C407, # these ignores are from flake8-logging-format; please fix! diff --git a/pyproject.toml b/pyproject.toml index 38f55c38ece..73501f5f6e8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,7 +29,7 @@ ignore = [ "B007", "B008", "B017", "B018", # Useless expression "B019", "B020", - "B023", "B024", "B026", + "B023", "B026", "B028", # No explicit `stacklevel` keyword argument found "B904", "E402", diff --git a/torch/ao/quantization/fx/quantize_handler.py b/torch/ao/quantization/fx/quantize_handler.py index e98bc334f00..e70040f7e64 100644 --- a/torch/ao/quantization/fx/quantize_handler.py +++ b/torch/ao/quantization/fx/quantize_handler.py @@ -1,24 +1,18 @@ -import torch -from torch.fx.graph import ( - Node, -) +from abc import ABC +from typing import Callable, Dict, List, Optional, Type + +import torch -from .utils import ( - all_node_args_have_no_tensors, -) from torch.ao.quantization.backend_config import ( BackendConfig, DTypeConfig, ObservationType, ) -from torch.ao.quantization.utils import ( - NodePattern, - Pattern, - QuantizerCls, -) +from torch.ao.quantization.utils import NodePattern, Pattern, QuantizerCls +from torch.fx.graph import Node + +from .utils import all_node_args_have_no_tensors -from abc import ABC -from typing import Callable, Dict, List, Type, Optional __all__ = [ "QuantizeHandler", @@ -45,7 +39,7 @@ def _default_root_node_getter(node_pattern): return node_pattern # Base Pattern Handler -class QuantizeHandler(ABC): +class QuantizeHandler(ABC): # noqa: B024 """ Base handler class for the quantizer patterns """ def __init__( diff --git a/torch/ao/quantization/quantizer/quantizer.py b/torch/ao/quantization/quantizer/quantizer.py index e5f2475b439..607e1b47a3b 100644 --- a/torch/ao/quantization/quantizer/quantizer.py +++ b/torch/ao/quantization/quantizer/quantizer.py @@ -37,7 +37,7 @@ SUPPORTED_QSCHEMES = [ ] -class QuantizationSpecBase(ABC): +class QuantizationSpecBase(ABC): # noqa: B024 """Base class for different types of quantization specs that allows users to specify how to quantize a Tensor (input/output of a Node) in the model """ diff --git a/torch/distributed/_shard/sharding_spec/api.py b/torch/distributed/_shard/sharding_spec/api.py index 121d9748dde..d389bff5ceb 100644 --- a/torch/distributed/_shard/sharding_spec/api.py +++ b/torch/distributed/_shard/sharding_spec/api.py @@ -21,7 +21,7 @@ if TYPE_CHECKING: # from run-time to resolve circular dependency. from torch.distributed._shard.sharded_tensor import ShardedTensor -class PlacementSpec(ABC): +class PlacementSpec(ABC): # noqa: B024 """ Base class representing the placement of an entity. Subclasses of this class can be used to specify customized placements which might not be diff --git a/torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py b/torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py index 4ca9289ea3a..b018799e296 100644 --- a/torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py +++ b/torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py @@ -1,4 +1,4 @@ -from abc import ABC +from abc import ABC, abstractmethod import inspect from typing import Dict, Type @@ -39,12 +39,14 @@ class OverlappedOptimizer(ABC): """ self.optim_cls = optim_cls + @abstractmethod def register_ddp(self, ddp: DistributedDataParallel) -> None: """Registers the overlapped optimizer with DDP.""" raise NotImplementedError( f"{self.__class__.__name__} does not support overlapped DDP." ) + @abstractmethod def register_fsdp(self, fsdp: FullyShardedDataParallel) -> None: """Registers the overlapped optimizer with FSDP.""" raise NotImplementedError( @@ -70,7 +72,11 @@ class _OverlappedStandardOptimizer(OverlappedOptimizer): ) # TODO: register_fsdp once FSDP supports communication hook. - + def register_fsdp(self, fsdp: FullyShardedDataParallel) -> None: + """Registers the overlapped optimizer with FSDP.""" + raise NotImplementedError( + f"{self.__class__.__name__} does not support overlapped FSDP." + ) def _as_overlapped_optim(optim_cls: Type, params, *args, **kwargs): """ diff --git a/torch/onnx/_internal/fx/_pass.py b/torch/onnx/_internal/fx/_pass.py index a02ffb62f88..e32e30ebb20 100644 --- a/torch/onnx/_internal/fx/_pass.py +++ b/torch/onnx/_internal/fx/_pass.py @@ -296,7 +296,7 @@ class Transform(abc.ABC): return module -class AnalysisResult(abc.ABC): +class AnalysisResult(abc.ABC): # noqa: B024 ... diff --git a/torchgen/api/types/types_base.py b/torchgen/api/types/types_base.py index 119b21eca66..2f8561e49ab 100644 --- a/torchgen/api/types/types_base.py +++ b/torchgen/api/types/types_base.py @@ -12,7 +12,7 @@ if we want to generate code for another C++ library. Add new types to `types.py` if these types are ATen/c10 related. Add new types to `types_base.py` if they are basic and not attached to ATen/c10. """ -from abc import ABC +from abc import ABC, abstractmethod from dataclasses import dataclass from enum import auto, Enum from typing import List, Optional, Union @@ -61,12 +61,15 @@ voidT = BaseCppType("", "void") class CType(ABC): + @abstractmethod def cpp_type(self, *, strip_ref: bool = False) -> str: raise NotImplementedError + @abstractmethod def cpp_type_registration_declarations(self) -> str: raise NotImplementedError + @abstractmethod def remove_const_ref(self) -> "CType": return self