pytorch/torch/nn/utils/__init__.py
Mikayla Gawarecki 2ee91db03d Add APIs to separate norm calculation and gradient scaling in nn.utils.clip_grad_norm_ (#139662)
Fixes https://github.com/pytorch/pytorch/issues/139467

Refactor `nn.utils.clip_grad_norm_` into `nn.utils.get_total_norm` and then `nn.utils.clip_grads_with_norm_` . `clip_grad_norm_` now calls into these two new ops,

`get_total_norm` is generalized (rather than `get_grad_norm` due to the discussion on the issue from @awgu)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/139662
Approved by: https://github.com/H-Huang
2024-11-07 23:13:23 +00:00

48 lines
1.2 KiB
Python

from . import parametrizations, rnn, stateless
from .clip_grad import (
_clip_grads_with_norm_ as clip_grads_with_norm_,
_get_total_norm as get_total_norm,
clip_grad_norm,
clip_grad_norm_,
clip_grad_value_,
)
from .convert_parameters import parameters_to_vector, vector_to_parameters
from .fusion import (
fuse_conv_bn_eval,
fuse_conv_bn_weights,
fuse_linear_bn_eval,
fuse_linear_bn_weights,
)
from .init import skip_init
from .memory_format import (
convert_conv2d_weight_memory_format,
convert_conv3d_weight_memory_format,
)
from .spectral_norm import remove_spectral_norm, spectral_norm
from .weight_norm import remove_weight_norm, weight_norm
__all__ = [
"clip_grad_norm",
"clip_grad_norm_",
"clip_grads_with_norm_",
"clip_grad_value_",
"convert_conv2d_weight_memory_format",
"convert_conv3d_weight_memory_format",
"fuse_conv_bn_eval",
"fuse_conv_bn_weights",
"fuse_linear_bn_eval",
"fuse_linear_bn_weights",
"get_total_norm",
"parameters_to_vector",
"parametrizations",
"remove_spectral_norm",
"remove_weight_norm",
"rnn",
"skip_init",
"spectral_norm",
"stateless",
"vector_to_parameters",
"weight_norm",
]