mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[BE][Easy][18/19] enforce style for empty lines in import segments in torch/d*/ (#129770)
See https://github.com/pytorch/pytorch/pull/129751#issue-2380881501. Most changes are auto-generated by linter. You can review these PRs via: ```bash git diff --ignore-all-space --ignore-blank-lines HEAD~1 ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/129770 Approved by: https://github.com/wconstab
This commit is contained in:
parent
bc7ed1fbdc
commit
b25ef91bf1
|
|
@ -54,7 +54,6 @@ ISORT_SKIPLIST = re.compile(
|
|||
# torch/[a-c]*/**
|
||||
"torch/[a-c]*/**",
|
||||
# torch/d*/**
|
||||
"torch/d*/**",
|
||||
# torch/[e-n]*/**
|
||||
"torch/[e-n]*/**",
|
||||
# torch/[o-z]*/**
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@
|
|||
# mypy: allow-untyped-defs
|
||||
import functools
|
||||
import logging
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import torch.distributed as dist
|
|||
import torch.nn.functional as F
|
||||
from torch.distributed._functional_collectives import AsyncCollectiveTensor
|
||||
|
||||
|
||||
if dist.is_available() or TYPE_CHECKING:
|
||||
from torch.distributed import distributed_c10d
|
||||
from torch.distributed._shard.sharded_tensor import ShardedTensor
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# mypy: allow-untyped-decorators
|
||||
import socket
|
||||
import uuid
|
||||
|
||||
from contextlib import contextmanager
|
||||
from datetime import timedelta
|
||||
from functools import partial
|
||||
|
|
@ -12,6 +11,7 @@ import torch.distributed._functional_collectives as funcol
|
|||
import torch.distributed.distributed_c10d as c10d
|
||||
from torch._C._distributed_c10d import _SymmetricMemory, Work as _Work
|
||||
|
||||
|
||||
_group_name_to_store: Dict[str, c10d.Store] = {}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ from torch.distributed.checkpoint.planner import (
|
|||
WriteItemType,
|
||||
)
|
||||
|
||||
|
||||
aten = (
|
||||
torch.ops.aten
|
||||
) # pyre-ignore[5]: Globally accessible variable `aten` has no type specified.
|
||||
|
|
|
|||
|
|
@ -3,18 +3,15 @@ import copy
|
|||
import json
|
||||
import re
|
||||
import weakref
|
||||
|
||||
from collections import defaultdict
|
||||
from typing import Any, Dict
|
||||
|
||||
import torch
|
||||
|
||||
import torch.nn
|
||||
|
||||
from torch._guards import detect_fake_mode
|
||||
from torch.autograd.graph import register_multi_grad_hook
|
||||
from torch.distributed._tensor.api import DTensor
|
||||
from torch.distributed._tools.mod_tracker import ModTracker
|
||||
|
||||
from torch.nn.modules.module import (
|
||||
register_module_forward_hook,
|
||||
register_module_forward_pre_hook,
|
||||
|
|
@ -23,10 +20,9 @@ from torch.nn.modules.module import (
|
|||
from torch.utils._python_dispatch import TorchDispatchMode
|
||||
from torch.utils._pytree import tree_flatten
|
||||
|
||||
|
||||
funcol_native = torch.ops._c10d_functional
|
||||
funcol_py = torch.ops.c10d_functional
|
||||
from torch._guards import detect_fake_mode
|
||||
|
||||
funcol_autograd = torch.ops._c10d_functional_autograd
|
||||
c10d_ops = torch.ops.c10d
|
||||
|
||||
|
|
|
|||
|
|
@ -1,14 +1,11 @@
|
|||
import os
|
||||
|
||||
from typing import Callable, Dict, Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from torch.distributed._tensor import DeviceMesh
|
||||
from torch.distributed._tensor.debug import CommDebugMode
|
||||
from torch.distributed._tensor.examples.comm_mode_features_example_argparser import args
|
||||
|
||||
from torch.distributed.tensor.parallel import (
|
||||
ColwiseParallel,
|
||||
parallelize_module,
|
||||
|
|
@ -21,7 +18,6 @@ from torch.testing._internal.distributed._tensor.common_dtensor import (
|
|||
NUM_DEVICES,
|
||||
Transformer,
|
||||
)
|
||||
|
||||
from torch.utils.checkpoint import checkpoint
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import argparse
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="comm_mode_feature examples",
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ from torch.utils._python_dispatch import TorchDispatchMode
|
|||
from torch.utils._pytree import tree_map_only
|
||||
from torch.utils.weak import WeakIdKeyDictionary, weakref
|
||||
|
||||
|
||||
_TOTAL_KEY = "Total"
|
||||
|
||||
__all__ = ["FSDPMemTracker"]
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ from typing import (
|
|||
TYPE_CHECKING,
|
||||
Union,
|
||||
)
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
import torch
|
||||
|
|
@ -32,9 +31,9 @@ from torch.utils._python_dispatch import (
|
|||
TorchDispatchMode,
|
||||
)
|
||||
from torch.utils._pytree import tree_flatten, tree_map_only
|
||||
|
||||
from torch.utils.weak import WeakIdKeyDictionary, weakref
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from torch.utils.hooks import RemovableHandle
|
||||
|
||||
|
|
|
|||
|
|
@ -2,13 +2,13 @@
|
|||
import functools
|
||||
import time
|
||||
from typing import Any, Callable, Dict, List, TypeVar
|
||||
|
||||
from typing_extensions import ParamSpec
|
||||
from uuid import uuid4
|
||||
|
||||
import torch.distributed.c10d_logger as c10d_logger
|
||||
from torch.distributed.checkpoint.logging_handlers import DCP_LOGGER_NAME
|
||||
|
||||
|
||||
__all__: List[str] = []
|
||||
|
||||
global _dcp_logger
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ from typing import Optional
|
|||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
||||
from torch._utils import _get_device_module
|
||||
from torch.distributed import distributed_c10d
|
||||
from torch.distributed._shard.sharded_tensor import (
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from typing import List, Tuple, Union
|
|||
import torch
|
||||
from torch import fx
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -71,6 +71,7 @@ derivative would be as follows::
|
|||
loss.backward()
|
||||
"""
|
||||
|
||||
from . import transforms
|
||||
from .bernoulli import Bernoulli
|
||||
from .beta import Beta
|
||||
from .binomial import Binomial
|
||||
|
|
@ -111,12 +112,12 @@ from .relaxed_categorical import RelaxedOneHotCategorical
|
|||
from .studentT import StudentT
|
||||
from .transformed_distribution import TransformedDistribution
|
||||
from .transforms import * # noqa: F403
|
||||
from . import transforms
|
||||
from .uniform import Uniform
|
||||
from .von_mises import VonMises
|
||||
from .weibull import Weibull
|
||||
from .wishart import Wishart
|
||||
|
||||
|
||||
_add_kl_info()
|
||||
del _add_kl_info
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ from torch.distributions.utils import (
|
|||
)
|
||||
from torch.nn.functional import binary_cross_entropy_with_logits
|
||||
|
||||
|
||||
__all__ = ["Bernoulli"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from torch.distributions.dirichlet import Dirichlet
|
|||
from torch.distributions.exp_family import ExponentialFamily
|
||||
from torch.distributions.utils import broadcast_all
|
||||
|
||||
|
||||
__all__ = ["Beta"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ from torch.distributions.utils import (
|
|||
probs_to_logits,
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["Binomial"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from torch.distributions import constraints
|
|||
from torch.distributions.distribution import Distribution
|
||||
from torch.distributions.utils import lazy_property, logits_to_probs, probs_to_logits
|
||||
|
||||
|
||||
__all__ = ["Categorical"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ from torch.distributions import constraints
|
|||
from torch.distributions.distribution import Distribution
|
||||
from torch.distributions.utils import broadcast_all
|
||||
|
||||
|
||||
__all__ = ["Cauchy"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
from torch.distributions import constraints
|
||||
from torch.distributions.gamma import Gamma
|
||||
|
||||
|
||||
__all__ = ["Chi2"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -70,6 +70,7 @@ import numbers
|
|||
|
||||
from torch.distributions import constraints, transforms
|
||||
|
||||
|
||||
__all__ = [
|
||||
"ConstraintRegistry",
|
||||
"biject_to",
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ The following constraints are implemented:
|
|||
|
||||
import torch
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Constraint",
|
||||
"boolean",
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ from torch.distributions.utils import (
|
|||
)
|
||||
from torch.nn.functional import binary_cross_entropy_with_logits
|
||||
|
||||
|
||||
__all__ = ["ContinuousBernoulli"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from torch.autograd.function import once_differentiable
|
|||
from torch.distributions import constraints
|
||||
from torch.distributions.exp_family import ExponentialFamily
|
||||
|
||||
|
||||
__all__ = ["Dirichlet"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ from torch.distributions import constraints
|
|||
from torch.distributions.utils import lazy_property
|
||||
from torch.types import _size
|
||||
|
||||
|
||||
__all__ = ["Distribution"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
import torch
|
||||
from torch.distributions.distribution import Distribution
|
||||
|
||||
|
||||
__all__ = ["ExponentialFamily"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from torch.distributions import constraints
|
|||
from torch.distributions.exp_family import ExponentialFamily
|
||||
from torch.distributions.utils import broadcast_all
|
||||
|
||||
|
||||
__all__ = ["Exponential"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ from torch.distributions.distribution import Distribution
|
|||
from torch.distributions.gamma import Gamma
|
||||
from torch.distributions.utils import broadcast_all
|
||||
|
||||
|
||||
__all__ = ["FisherSnedecor"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from torch.distributions import constraints
|
|||
from torch.distributions.exp_family import ExponentialFamily
|
||||
from torch.distributions.utils import broadcast_all
|
||||
|
||||
|
||||
__all__ = ["Gamma"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ from torch.distributions.utils import (
|
|||
)
|
||||
from torch.nn.functional import binary_cross_entropy_with_logits
|
||||
|
||||
|
||||
__all__ = ["Geometric"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ from torch.distributions.transforms import AffineTransform, ExpTransform
|
|||
from torch.distributions.uniform import Uniform
|
||||
from torch.distributions.utils import broadcast_all, euler_constant
|
||||
|
||||
|
||||
__all__ = ["Gumbel"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ from torch.distributions.cauchy import Cauchy
|
|||
from torch.distributions.transformed_distribution import TransformedDistribution
|
||||
from torch.distributions.transforms import AbsTransform
|
||||
|
||||
|
||||
__all__ = ["HalfCauchy"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ from torch.distributions.normal import Normal
|
|||
from torch.distributions.transformed_distribution import TransformedDistribution
|
||||
from torch.distributions.transforms import AbsTransform
|
||||
|
||||
|
||||
__all__ = ["HalfNormal"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from torch.distributions import constraints
|
|||
from torch.distributions.distribution import Distribution
|
||||
from torch.distributions.utils import _sum_rightmost
|
||||
|
||||
|
||||
__all__ = ["Independent"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ from .transformed_distribution import TransformedDistribution
|
|||
from .uniform import Uniform
|
||||
from .utils import _sum_rightmost, euler_constant as _euler_gamma
|
||||
|
||||
|
||||
_KL_REGISTRY: Dict[
|
||||
Tuple[Type, Type], Callable
|
||||
] = {} # Source of truth mapping a few general (type, type) pairs to functions.
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from torch.distributions.transforms import AffineTransform, PowerTransform
|
|||
from torch.distributions.uniform import Uniform
|
||||
from torch.distributions.utils import broadcast_all, euler_constant
|
||||
|
||||
|
||||
__all__ = ["Kumaraswamy"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from torch.distributions import constraints
|
|||
from torch.distributions.distribution import Distribution
|
||||
from torch.distributions.utils import broadcast_all
|
||||
|
||||
|
||||
__all__ = ["Laplace"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ from torch.distributions import Beta, constraints
|
|||
from torch.distributions.distribution import Distribution
|
||||
from torch.distributions.utils import broadcast_all
|
||||
|
||||
|
||||
__all__ = ["LKJCholesky"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from torch.distributions.normal import Normal
|
|||
from torch.distributions.transformed_distribution import TransformedDistribution
|
||||
from torch.distributions.transforms import ExpTransform
|
||||
|
||||
|
||||
__all__ = ["LogNormal"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from torch.distributions.normal import Normal
|
|||
from torch.distributions.transformed_distribution import TransformedDistribution
|
||||
from torch.distributions.transforms import StickBreakingTransform
|
||||
|
||||
|
||||
__all__ = ["LogisticNormal"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from torch.distributions.distribution import Distribution
|
|||
from torch.distributions.multivariate_normal import _batch_mahalanobis, _batch_mv
|
||||
from torch.distributions.utils import _standard_normal, lazy_property
|
||||
|
||||
|
||||
__all__ = ["LowRankMultivariateNormal"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import torch
|
|||
from torch.distributions import Categorical, constraints
|
||||
from torch.distributions.distribution import Distribution
|
||||
|
||||
|
||||
__all__ = ["MixtureSameFamily"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from torch.distributions.binomial import Binomial
|
|||
from torch.distributions.distribution import Distribution
|
||||
from torch.distributions.utils import broadcast_all
|
||||
|
||||
|
||||
__all__ = ["Multinomial"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from torch.distributions import constraints
|
|||
from torch.distributions.distribution import Distribution
|
||||
from torch.distributions.utils import _standard_normal, lazy_property
|
||||
|
||||
|
||||
__all__ = ["MultivariateNormal"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from torch.distributions.utils import (
|
|||
probs_to_logits,
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["NegativeBinomial"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from torch.distributions import constraints
|
|||
from torch.distributions.exp_family import ExponentialFamily
|
||||
from torch.distributions.utils import _standard_normal, broadcast_all
|
||||
|
||||
|
||||
__all__ = ["Normal"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from torch.distributions import constraints
|
|||
from torch.distributions.categorical import Categorical
|
||||
from torch.distributions.distribution import Distribution
|
||||
|
||||
|
||||
__all__ = ["OneHotCategorical", "OneHotCategoricalStraightThrough"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from torch.distributions.transformed_distribution import TransformedDistribution
|
|||
from torch.distributions.transforms import AffineTransform, ExpTransform
|
||||
from torch.distributions.utils import broadcast_all
|
||||
|
||||
|
||||
__all__ = ["Pareto"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from torch.distributions import constraints
|
|||
from torch.distributions.exp_family import ExponentialFamily
|
||||
from torch.distributions.utils import broadcast_all
|
||||
|
||||
|
||||
__all__ = ["Poisson"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ from torch.distributions.utils import (
|
|||
probs_to_logits,
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["LogitRelaxedBernoulli", "RelaxedBernoulli"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from torch.distributions.transformed_distribution import TransformedDistribution
|
|||
from torch.distributions.transforms import ExpTransform
|
||||
from torch.distributions.utils import broadcast_all, clamp_probs
|
||||
|
||||
|
||||
__all__ = ["ExpRelaxedCategorical", "RelaxedOneHotCategorical"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from torch.distributions import Chi2, constraints
|
|||
from torch.distributions.distribution import Distribution
|
||||
from torch.distributions.utils import _standard_normal, broadcast_all
|
||||
|
||||
|
||||
__all__ = ["StudentT"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ from torch.distributions.independent import Independent
|
|||
from torch.distributions.transforms import ComposeTransform, Transform
|
||||
from torch.distributions.utils import _sum_rightmost
|
||||
|
||||
|
||||
__all__ = ["TransformedDistribution"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ from torch.distributions.utils import (
|
|||
)
|
||||
from torch.nn.functional import pad, softplus
|
||||
|
||||
|
||||
__all__ = [
|
||||
"AbsTransform",
|
||||
"AffineTransform",
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from torch.distributions import constraints
|
|||
from torch.distributions.distribution import Distribution
|
||||
from torch.distributions.utils import broadcast_all
|
||||
|
||||
|
||||
__all__ = ["Uniform"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import torch
|
|||
import torch.nn.functional as F
|
||||
from torch.overrides import is_tensor_like
|
||||
|
||||
|
||||
euler_constant = 0.57721566490153286060 # Euler Mascheroni Constant
|
||||
|
||||
__all__ = [
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from torch.distributions import constraints
|
|||
from torch.distributions.distribution import Distribution
|
||||
from torch.distributions.utils import broadcast_all, lazy_property
|
||||
|
||||
|
||||
__all__ = ["VonMises"]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from torch.distributions.transformed_distribution import TransformedDistribution
|
|||
from torch.distributions.transforms import AffineTransform, PowerTransform
|
||||
from torch.distributions.utils import broadcast_all
|
||||
|
||||
|
||||
__all__ = ["Weibull"]
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user