mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Ignore F401 in all __init__.py without putting noqa (#25823)
Summary: By adding `per-file-ignores = __init__.py: F401` into `.flake8` with `flake8>=3.7`, we can ignore F410 in all `__init__.py` without putting `# noqa: F401` line by line. http://flake8.pycqa.org/en/latest/user/options.html?highlight=per-file-ignores#cmdoption-flake8-per-file-ignores Pull Request resolved: https://github.com/pytorch/pytorch/pull/25823 Differential Revision: D17252182 Pulled By: soumith fbshipit-source-id: 87b174075b79e4078953a7521bd1a8f82405646b
This commit is contained in:
parent
76d262d4b7
commit
d83389d327
1
.flake8
1
.flake8
|
|
@ -9,4 +9,5 @@ ignore =
|
|||
B007,B008,
|
||||
# these ignores are from flake8-comprehensions; please fix!
|
||||
C400,C401,C402,C403,C404,C405,C407,C411,
|
||||
per-file-ignores = __init__.py: F401
|
||||
exclude = docs/src,venv,third_party,caffe2,scripts,docs/caffe2,torch/lib/include,torch/lib/tmp_install,build,torch/include,*.pyi,.git
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
from .cells import * # noqa: F401
|
||||
from .factory import * # noqa: F401
|
||||
from .cells import *
|
||||
from .factory import *
|
||||
|
||||
# (output, next_state) = cell(input, state)
|
||||
seqLength = 100
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from .squeezenet import * # noqa: F401
|
||||
from .super_resolution import * # noqa: F401
|
||||
from .op_test import * # noqa: F401
|
||||
from .srresnet import * # noqa: F401
|
||||
from .squeezenet import *
|
||||
from .super_resolution import *
|
||||
from .op_test import *
|
||||
from .srresnet import *
|
||||
|
|
|
|||
|
|
@ -1,2 +1,2 @@
|
|||
from .module_loader import import_module # noqa: F401
|
||||
from .cwrap_common import set_declaration_defaults, sort_by_number_of_args # noqa: F401
|
||||
from .module_loader import import_module
|
||||
from .cwrap_common import set_declaration_defaults, sort_by_number_of_args
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import sys
|
|||
import platform
|
||||
from ._utils import _import_dotted_name
|
||||
from ._utils_internal import get_file_path, prepare_multiprocessing_environment
|
||||
from .version import __version__ # noqa: F401
|
||||
from .version import __version__
|
||||
from ._six import string_classes as _string_classes
|
||||
|
||||
__all__ = [
|
||||
|
|
@ -41,7 +41,7 @@ import os as _dl_flags
|
|||
# if we have numpy, it *must* be imported before the call to setdlopenflags()
|
||||
# or there is risk that later c modules will segfault when importing numpy
|
||||
try:
|
||||
import numpy as _np # noqa: F401
|
||||
import numpy as _np
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
|
@ -301,7 +301,7 @@ del BFloat16StorageBase
|
|||
|
||||
import torch.cuda
|
||||
import torch.autograd
|
||||
from torch.autograd import no_grad, enable_grad, set_grad_enabled # noqa: F401
|
||||
from torch.autograd import no_grad, enable_grad, set_grad_enabled
|
||||
import torch.nn
|
||||
import torch.nn.intrinsic
|
||||
import torch.nn.quantized
|
||||
|
|
@ -337,8 +337,8 @@ def compiled_with_cxx11_abi():
|
|||
|
||||
|
||||
# Import the ops "namespace"
|
||||
from torch._ops import ops # noqa: F401
|
||||
from torch._classes import classes # noqa: F401
|
||||
from torch._ops import ops
|
||||
from torch._classes import classes
|
||||
|
||||
# Import the quasi random sampler
|
||||
import torch.quasirandom
|
||||
|
|
|
|||
|
|
@ -8,11 +8,11 @@ import torch
|
|||
import warnings
|
||||
|
||||
from .variable import Variable
|
||||
from .function import Function, NestedIOFunction # noqa: F401
|
||||
from .gradcheck import gradcheck, gradgradcheck # noqa: F401
|
||||
from .grad_mode import no_grad, enable_grad, set_grad_enabled # noqa: F401
|
||||
from .anomaly_mode import detect_anomaly, set_detect_anomaly # noqa: F401
|
||||
from . import profiler # noqa: F401
|
||||
from .function import Function, NestedIOFunction
|
||||
from .gradcheck import gradcheck, gradgradcheck
|
||||
from .grad_mode import no_grad, enable_grad, set_grad_enabled
|
||||
from .anomaly_mode import detect_anomaly, set_detect_anomaly
|
||||
from . import profiler
|
||||
|
||||
__all__ = ['Variable', 'Function', 'backward', 'grad_mode']
|
||||
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
from .tensor import * # noqa: F401
|
||||
from .tensor import *
|
||||
|
|
|
|||
|
|
@ -555,7 +555,7 @@ torch._storage_classes.add(HalfStorage)
|
|||
torch._storage_classes.add(BoolStorage)
|
||||
torch._storage_classes.add(BFloat16Storage)
|
||||
|
||||
from . import sparse # noqa: F401
|
||||
from . import profiler # noqa: F401
|
||||
from . import nvtx # noqa: F401
|
||||
from .streams import Stream, Event # noqa: F401
|
||||
from . import sparse
|
||||
from . import profiler
|
||||
from . import nvtx
|
||||
from .streams import Stream, Event
|
||||
|
|
|
|||
|
|
@ -13,8 +13,9 @@ if is_available() and not (torch._C._c10d_init() and torch._C._rpc_init() and to
|
|||
|
||||
|
||||
if is_available():
|
||||
from .distributed_c10d import * # noqa: F401
|
||||
from .distributed_c10d import *
|
||||
# Variables prefixed with underscore are not auto imported
|
||||
# See the comment in `distributed_c10d.py` above `_backend` on why we expose
|
||||
# this.
|
||||
from .distributed_c10d import _backend # noqa: F401
|
||||
|
||||
from .distributed_c10d import _backend
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
from .onnx import * # noqa: F401
|
||||
from .onnx import *
|
||||
|
|
|
|||
|
|
@ -32,8 +32,8 @@ import warnings
|
|||
from collections import OrderedDict
|
||||
|
||||
# These are imported so users can access them from the `torch.jit` module
|
||||
from torch._jit_internal import Final, _overload, _overload_method # noqa: F401
|
||||
from torch._jit_internal import ignore, export, unused # noqa: F401
|
||||
from torch._jit_internal import Final, _overload, _overload_method
|
||||
from torch._jit_internal import ignore, export, unused
|
||||
|
||||
if sys.version_info[0] > 2:
|
||||
import pathlib
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ __all__ = ['set_sharing_strategy', 'get_sharing_strategy',
|
|||
'get_all_sharing_strategies']
|
||||
|
||||
|
||||
from multiprocessing import * # noqa: F401
|
||||
from multiprocessing import *
|
||||
|
||||
|
||||
__all__ += multiprocessing.__all__
|
||||
|
|
@ -36,13 +36,13 @@ torch._C._multiprocessing_init()
|
|||
if sys.version_info < (3, 3):
|
||||
"""Override basic classes in Python 2.7 and Python 3.3 to use ForkingPickler
|
||||
for serialization. Later versions of Python already use ForkingPickler."""
|
||||
from .queue import Queue, SimpleQueue # noqa: F401
|
||||
from .pool import Pool # noqa: F401
|
||||
from .queue import Queue, SimpleQueue
|
||||
from .pool import Pool
|
||||
|
||||
|
||||
"""Add helper function to spawn N processes and wait for completion of any of
|
||||
them. This depends `mp.get_context` which was added in Python 3.4."""
|
||||
from .spawn import spawn, SpawnContext, _supports_context # noqa: F401
|
||||
from .spawn import spawn, SpawnContext, _supports_context
|
||||
|
||||
|
||||
if sys.platform == 'darwin' or sys.platform == 'win32':
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
from .modules import * # noqa: F401
|
||||
from .parameter import Parameter # noqa: F401
|
||||
from .parallel import DataParallel # noqa: F401
|
||||
from . import init # noqa: F401
|
||||
from . import utils # noqa: F401
|
||||
from .modules import *
|
||||
from .parameter import Parameter
|
||||
from .parallel import DataParallel
|
||||
from . import init
|
||||
from . import utils
|
||||
|
|
|
|||
|
|
@ -1,2 +1,2 @@
|
|||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
from .modules import * # noqa: F401
|
||||
from .modules import *
|
||||
|
|
|
|||
|
|
@ -3,4 +3,4 @@ from __future__ import division
|
|||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .modules import * # noqa: F401
|
||||
from .modules import *
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from .modules import * # noqa: F401
|
||||
from .modules import *
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from . import rnn # noqa: F401
|
||||
from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_ # noqa: F401
|
||||
from .weight_norm import weight_norm, remove_weight_norm # noqa: F401
|
||||
from .convert_parameters import parameters_to_vector, vector_to_parameters # noqa: F401
|
||||
from .spectral_norm import spectral_norm, remove_spectral_norm # noqa: F401
|
||||
from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights # noqa: F401
|
||||
from . import rnn
|
||||
from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_
|
||||
from .weight_norm import weight_norm, remove_weight_norm
|
||||
from .convert_parameters import parameters_to_vector, vector_to_parameters
|
||||
from .spectral_norm import spectral_norm, remove_spectral_norm
|
||||
from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
|
||||
|
|
|
|||
|
|
@ -5,19 +5,19 @@ enough, so that more sophisticated ones can be also easily integrated in the
|
|||
future.
|
||||
"""
|
||||
|
||||
from .adadelta import Adadelta # noqa: F401
|
||||
from .adagrad import Adagrad # noqa: F401
|
||||
from .adam import Adam # noqa: F401
|
||||
from .adamw import AdamW # noqa: F401
|
||||
from .sparse_adam import SparseAdam # noqa: F401
|
||||
from .adamax import Adamax # noqa: F401
|
||||
from .asgd import ASGD # noqa: F401
|
||||
from .sgd import SGD # noqa: F401
|
||||
from .rprop import Rprop # noqa: F401
|
||||
from .rmsprop import RMSprop # noqa: F401
|
||||
from .optimizer import Optimizer # noqa: F401
|
||||
from .lbfgs import LBFGS # noqa: F401
|
||||
from . import lr_scheduler # noqa: F401
|
||||
from .adadelta import Adadelta
|
||||
from .adagrad import Adagrad
|
||||
from .adam import Adam
|
||||
from .adamw import AdamW
|
||||
from .sparse_adam import SparseAdam
|
||||
from .adamax import Adamax
|
||||
from .asgd import ASGD
|
||||
from .sgd import SGD
|
||||
from .rprop import Rprop
|
||||
from .rmsprop import RMSprop
|
||||
from .optimizer import Optimizer
|
||||
from .lbfgs import LBFGS
|
||||
from . import lr_scheduler
|
||||
|
||||
del adadelta
|
||||
del adagrad
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
from .quantize import * # noqa: F401
|
||||
from .observer import * # noqa: F401
|
||||
from .qconfig import * # noqa: F401
|
||||
from .fake_quantize import * # noqa: F401
|
||||
from .fuse_modules import fuse_modules # noqa: F401
|
||||
from .stubs import * # noqa: F401
|
||||
from .quantize import *
|
||||
from .observer import *
|
||||
from .qconfig import *
|
||||
from .fake_quantize import *
|
||||
from .fuse_modules import fuse_modules
|
||||
from .stubs import *
|
||||
|
||||
def default_eval_fn(model, calib_data):
|
||||
r"""
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from .throughput_benchmark import ThroughputBenchmark # noqa: F401
|
||||
from .throughput_benchmark import ThroughputBenchmark
|
||||
|
||||
# Set the module for a given object for nicer printing
|
||||
def set_module(obj, mod):
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from .sampler import Sampler, SequentialSampler, RandomSampler, SubsetRandomSampler, WeightedRandomSampler, BatchSampler # noqa: F401
|
||||
from .distributed import DistributedSampler # noqa: F401
|
||||
from .dataset import Dataset, IterableDataset, TensorDataset, ConcatDataset, ChainDataset, Subset, random_split # noqa: F401
|
||||
from .dataloader import DataLoader, _DatasetKind, get_worker_info # noqa: F401
|
||||
from .sampler import Sampler, SequentialSampler, RandomSampler, SubsetRandomSampler, WeightedRandomSampler, BatchSampler
|
||||
from .distributed import DistributedSampler
|
||||
from .dataset import Dataset, IterableDataset, TensorDataset, ConcatDataset, ChainDataset, Subset, random_split
|
||||
from .dataloader import DataLoader, _DatasetKind, get_worker_info
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import sys
|
|||
import atexit
|
||||
|
||||
# old private location of the ExceptionWrapper that some users rely on:
|
||||
from torch._utils import ExceptionWrapper # noqa: F401
|
||||
from torch._utils import ExceptionWrapper
|
||||
|
||||
|
||||
IS_WINDOWS = sys.platform == "win32"
|
||||
|
|
@ -42,4 +42,4 @@ def _set_python_exit_flag():
|
|||
atexit.register(_set_python_exit_flag)
|
||||
|
||||
|
||||
from . import worker, signal_handling, pin_memory, collate, fetch # noqa: F401
|
||||
from . import worker, signal_handling, pin_memory, collate, fetch
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user