mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[BE] wrap deprecated function/class with typing_extensions.deprecated (#127689)
Use `typing_extensions.deprecated` for deprecation annotation if possible. Otherwise, add `category=FutureWarning` to `warnings.warn("message")` if the category is missing.
Note that only warnings that their messages contain `[Dd]eprecat(ed|ion)` are updated in this PR.
Resolves #126888
- #126888
This PR is split from PR #126898.
- #126898
------
Pull Request resolved: https://github.com/pytorch/pytorch/pull/127689
Approved by: https://github.com/Skylion007
This commit is contained in:
parent
c1dd3a615f
commit
67ef2683d9
2
.github/requirements/conda-env-Linux-X64.txt
vendored
2
.github/requirements/conda-env-Linux-X64.txt
vendored
|
|
@ -5,4 +5,4 @@ ninja=1.10.2
|
|||
numpy=1.23.3
|
||||
pyyaml=6.0
|
||||
setuptools=68.2.2
|
||||
typing-extensions=4.3.0
|
||||
typing-extensions=4.9.0
|
||||
|
|
|
|||
2
.github/requirements/conda-env-iOS.txt
vendored
2
.github/requirements/conda-env-iOS.txt
vendored
|
|
@ -4,4 +4,4 @@ ninja=1.10.2
|
|||
numpy=1.23.3
|
||||
pyyaml=6.0
|
||||
setuptools=68.2.2
|
||||
typing-extensions=4.3.0
|
||||
typing-extensions=4.9.0
|
||||
|
|
|
|||
2
.github/requirements/conda-env-macOS-ARM64
vendored
2
.github/requirements/conda-env-macOS-ARM64
vendored
|
|
@ -2,7 +2,7 @@ numpy=1.22.3
|
|||
pyyaml=6.0
|
||||
setuptools=61.2.0
|
||||
cmake=3.22.*
|
||||
typing-extensions=4.3.0
|
||||
typing-extensions=4.9.0
|
||||
dataclasses=0.8
|
||||
pip=22.2.2
|
||||
pillow=10.0.1
|
||||
|
|
|
|||
2
.github/requirements/conda-env-macOS-X64
vendored
2
.github/requirements/conda-env-macOS-X64
vendored
|
|
@ -4,7 +4,7 @@ numpy=1.21.2
|
|||
pyyaml=5.3
|
||||
setuptools=46.0.0
|
||||
cmake=3.22.*
|
||||
typing-extensions=4.3.0
|
||||
typing-extensions=4.9.0
|
||||
dataclasses=0.8
|
||||
pip=22.2.2
|
||||
pillow=10.0.1
|
||||
|
|
|
|||
|
|
@ -237,7 +237,7 @@ class DTensorAPITest(DTensorTestBase):
|
|||
assert isinstance(outputs, DTensor)
|
||||
return outputs.to_local()
|
||||
|
||||
with self.assertWarnsRegex(UserWarning, "Deprecating"):
|
||||
with self.assertWarnsRegex(FutureWarning, "Deprecating"):
|
||||
replica_module = distribute_module(
|
||||
module_to_replicate,
|
||||
device_mesh,
|
||||
|
|
|
|||
|
|
@ -1436,7 +1436,7 @@ class TestFSDPOptimState(FSDPTest):
|
|||
def get_warning_context():
|
||||
warning_regex = "`optim_input` argument is deprecated"
|
||||
return self.assertWarnsRegex(
|
||||
expected_warning=UserWarning, expected_regex=warning_regex
|
||||
expected_warning=FutureWarning, expected_regex=warning_regex
|
||||
)
|
||||
|
||||
self._run_on_all_optim_state_apis(
|
||||
|
|
|
|||
|
|
@ -3258,7 +3258,7 @@ class TestComposability(TestCase):
|
|||
x = torch.randn(3, device=device)
|
||||
|
||||
# functorch version of the API is deprecated
|
||||
with self.assertWarnsRegex(UserWarning, "Please use torch.vmap"):
|
||||
with self.assertWarnsRegex(FutureWarning, "Please use `torch.vmap`"):
|
||||
vmap(torch.sin)
|
||||
|
||||
# the non-functorch version is not deprecated
|
||||
|
|
@ -3276,7 +3276,9 @@ class TestComposability(TestCase):
|
|||
new_api = getattr(torch.func, transform)
|
||||
|
||||
# functorch version of the API is deprecated
|
||||
with self.assertWarnsRegex(UserWarning, f"Please use torch.func.{transform}"):
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning, f"Please use `torch.func.{transform}`"
|
||||
):
|
||||
api(torch.sin)
|
||||
|
||||
# the non-functorch version is not deprecated
|
||||
|
|
|
|||
|
|
@ -521,7 +521,7 @@ class TestNNInit(TestCase):
|
|||
init.normal(x)
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
UserWarning,
|
||||
FutureWarning,
|
||||
"deprecated",
|
||||
msg="methods not suffixed with underscore should be deprecated",
|
||||
):
|
||||
|
|
|
|||
|
|
@ -1387,7 +1387,8 @@ class TestModuleHookNN(NNTestCase):
|
|||
m.register_backward_hook(noop)
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
UserWarning, "does not take as input a single Tensor or a tuple of Tensors"
|
||||
FutureWarning,
|
||||
"does not take as input a single Tensor or a tuple of Tensors",
|
||||
):
|
||||
m([a, b])
|
||||
|
||||
|
|
@ -1400,7 +1401,7 @@ class TestModuleHookNN(NNTestCase):
|
|||
m.register_backward_hook(noop)
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
UserWarning, "does not return a single Tensor or a tuple of Tensors"
|
||||
FutureWarning, "does not return a single Tensor or a tuple of Tensors"
|
||||
):
|
||||
m(a, b)
|
||||
|
||||
|
|
@ -1413,7 +1414,7 @@ class TestModuleHookNN(NNTestCase):
|
|||
m.register_backward_hook(noop)
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
UserWarning, "outputs are generated by different autograd Nodes"
|
||||
FutureWarning, "outputs are generated by different autograd Nodes"
|
||||
):
|
||||
m(a, b)
|
||||
|
||||
|
|
@ -1426,7 +1427,7 @@ class TestModuleHookNN(NNTestCase):
|
|||
m.register_backward_hook(noop)
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
UserWarning, "the forward contains multiple autograd Nodes"
|
||||
FutureWarning, "the forward contains multiple autograd Nodes"
|
||||
):
|
||||
m(a)
|
||||
|
||||
|
|
|
|||
|
|
@ -255,8 +255,8 @@ class TestAutocastCPU(TestCase):
|
|||
|
||||
def test_cpu_autocast_deprecated_warning(self):
|
||||
with self.assertWarnsRegex(
|
||||
DeprecationWarning,
|
||||
r"torch.cpu.amp.autocast\(args...\) is deprecated. Please use torch.amp.autocast\('cpu', args...\) instead.",
|
||||
FutureWarning,
|
||||
r"`torch.cpu.amp.autocast\(args...\)` is deprecated. Please use `torch.amp.autocast\('cpu', args...\)` instead.",
|
||||
):
|
||||
with torch.cpu.amp.autocast():
|
||||
_ = torch.ones(10)
|
||||
|
|
|
|||
|
|
@ -154,7 +154,7 @@ class TestAutograd(TestCase):
|
|||
|
||||
def test_grad_mode_class_decoration(self):
|
||||
# Decorating class is deprecated and should not be used
|
||||
with self.assertWarnsRegex(UserWarning, "Decorating classes is deprecated"):
|
||||
with self.assertWarnsRegex(FutureWarning, "Decorating classes is deprecated"):
|
||||
|
||||
@torch.no_grad()
|
||||
class Foo:
|
||||
|
|
@ -5937,13 +5937,13 @@ Done""",
|
|||
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
UserWarning, "get_numerical_jacobian was part of PyTorch's private API"
|
||||
FutureWarning, "`get_numerical_jacobian` was part of PyTorch's private API"
|
||||
):
|
||||
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
|
||||
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
UserWarning, "get_numerical_jacobian was part of PyTorch's private API"
|
||||
FutureWarning, "`get_numerical_jacobian` was part of PyTorch's private API"
|
||||
):
|
||||
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
|
||||
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
|
||||
|
|
@ -5963,7 +5963,7 @@ Done""",
|
|||
|
||||
outputs = fn(a, b)
|
||||
with self.assertWarnsRegex(
|
||||
UserWarning, "get_analytical_jacobian was part of PyTorch's private API"
|
||||
FutureWarning, "`get_analytical_jacobian` was part of PyTorch's private API"
|
||||
):
|
||||
(
|
||||
jacobians,
|
||||
|
|
@ -5991,7 +5991,7 @@ Done""",
|
|||
|
||||
outputs = NonDetFunc.apply(a, 1e-6)
|
||||
with self.assertWarnsRegex(
|
||||
UserWarning, "get_analytical_jacobian was part of PyTorch's private API"
|
||||
FutureWarning, "`get_analytical_jacobian` was part of PyTorch's private API"
|
||||
):
|
||||
(
|
||||
jacobians,
|
||||
|
|
|
|||
|
|
@ -1820,10 +1820,10 @@ torch.cuda.synchronize()
|
|||
return grad, grad
|
||||
|
||||
self.assertRegex(
|
||||
str(w[0].message), r"torch.cuda.amp.custom_fwd\(args...\) is deprecated."
|
||||
str(w[0].message), r"`torch.cuda.amp.custom_fwd\(args...\)` is deprecated."
|
||||
)
|
||||
self.assertRegex(
|
||||
str(w[1].message), r"torch.cuda.amp.custom_bwd\(args...\) is deprecated."
|
||||
str(w[1].message), r"`torch.cuda.amp.custom_bwd\(args...\)` is deprecated."
|
||||
)
|
||||
|
||||
mymm = MyMM.apply
|
||||
|
|
@ -2016,8 +2016,8 @@ torch.cuda.synchronize()
|
|||
|
||||
def test_cuda_autocast_deprecated_warning(self):
|
||||
with self.assertWarnsRegex(
|
||||
DeprecationWarning,
|
||||
r"torch.cuda.amp.autocast\(args...\) is deprecated. Please use torch.amp.autocast\('cuda', args...\) instead.",
|
||||
FutureWarning,
|
||||
r"`torch.cuda.amp.autocast\(args...\)` is deprecated. Please use `torch.amp.autocast\('cuda', args...\)` instead.",
|
||||
):
|
||||
with torch.cuda.amp.autocast():
|
||||
_ = torch.ones(10)
|
||||
|
|
|
|||
|
|
@ -338,7 +338,7 @@ $1: f32[2] = torch._ops.prims.sin.default($0)""")
|
|||
prims.mul(torch.randn(2), 1 + 1j)
|
||||
|
||||
def test_check_deprecation_warning(self):
|
||||
with self.assertWarnsRegex(DeprecationWarning, 'will be removed in the future'):
|
||||
with self.assertWarnsRegex(FutureWarning, 'will be removed in the future'):
|
||||
torch._prims_common.check(True, lambda: 'message')
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -723,7 +723,7 @@ class TestPythonPytree(TestCase):
|
|||
self.y = y
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
UserWarning, "torch.utils._pytree._register_pytree_node"
|
||||
FutureWarning, "torch.utils._pytree._register_pytree_node"
|
||||
):
|
||||
py_pytree._register_pytree_node(
|
||||
DummyType,
|
||||
|
|
|
|||
|
|
@ -901,7 +901,7 @@ exit(len(w))
|
|||
m = torch.nn.Linear(1, 1)
|
||||
params = dict(m.named_parameters())
|
||||
x = torch.randn(3, 1)
|
||||
with self.assertWarnsRegex(UserWarning, "Please use torch.func.functional_call"):
|
||||
with self.assertWarnsRegex(FutureWarning, "Please use `torch.func.functional_call`"):
|
||||
stateless.functional_call(m, params, x)
|
||||
|
||||
class TestPythonOptimizeMode(TestCase):
|
||||
|
|
|
|||
|
|
@ -6198,8 +6198,8 @@ else:
|
|||
GradScaler = torch.cuda.amp.GradScaler if "cuda" == device.type else torch.cpu.amp.GradScaler
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
UserWarning,
|
||||
rf"torch.{device.type}.amp.GradScaler\(args...\) is deprecated.",
|
||||
FutureWarning,
|
||||
rf"`torch.{device.type}.amp.GradScaler\(args...\)` is deprecated.",
|
||||
):
|
||||
_ = GradScaler(init_scale=2.0)
|
||||
|
||||
|
|
|
|||
|
|
@ -1996,17 +1996,6 @@ from torch import func as func
|
|||
from torch.func import vmap
|
||||
|
||||
|
||||
# The function _sparse_coo_tensor_unsafe is removed from PyTorch
|
||||
# Python API (v. 1.13), here we temporarily provide its replacement
|
||||
# with a deprecation warning.
|
||||
# TODO: remove the function for PyTorch v 1.15.
|
||||
def _sparse_coo_tensor_unsafe(*args, **kwargs):
|
||||
import warnings
|
||||
warnings.warn('torch._sparse_coo_tensor_unsafe is deprecated, '
|
||||
'use torch.sparse_coo_tensor(..., check_invariants=False) instead.')
|
||||
kwargs['check_invariants'] = False
|
||||
return torch.sparse_coo_tensor(*args, **kwargs)
|
||||
|
||||
# Register MPS specific decomps
|
||||
torch.backends.mps._init()
|
||||
|
||||
|
|
|
|||
|
|
@ -798,7 +798,9 @@ def explain(f, *extra_args, **extra_kwargs):
|
|||
warnings.warn(
|
||||
"explain(f, *args, **kwargs) is deprecated, use explain(f)(*args, **kwargs) instead. "
|
||||
"If you don't migrate, we may break your explain call in the future if your user defined kwargs "
|
||||
"conflict with future kwargs added to explain(f)."
|
||||
"conflict with future kwargs added to explain(f).",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return inner(*extra_args, **extra_kwargs)
|
||||
else:
|
||||
|
|
@ -941,7 +943,7 @@ def check_signature_rewritable(graph):
|
|||
tb = "".join(traceback.format_list(stack))
|
||||
extra = ""
|
||||
if len(user_stacks) > 1:
|
||||
extra = f"(elided {len(user_stacks)-1} more accesses)"
|
||||
extra = f"(elided {len(user_stacks) - 1} more accesses)"
|
||||
msg = f"{source.name()}, accessed at:\n{tb}{extra}"
|
||||
# TODO: option to print ALL of the stack traces at once
|
||||
input_errors.append(msg)
|
||||
|
|
@ -1476,7 +1478,9 @@ def export(
|
|||
warnings.warn(
|
||||
"export(f, *args, **kwargs) is deprecated, use export(f)(*args, **kwargs) instead. "
|
||||
"If you don't migrate, we may break your export call in the future if your user defined kwargs "
|
||||
"conflict with future kwargs added to export(f)."
|
||||
"conflict with future kwargs added to export(f).",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return inner(*extra_args, **extra_kwargs)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,3 +1,12 @@
|
|||
"""
|
||||
The APIs in this file are exposed as `functorch.*`. They are thin wrappers
|
||||
around the torch.func.* APIs that have deprecation warnings -- we're trying
|
||||
to move people to the torch.func.* equivalents.
|
||||
|
||||
NB: We don't use *args, **kwargs in the signatures because that changes the
|
||||
documentation.
|
||||
"""
|
||||
|
||||
import textwrap
|
||||
import warnings
|
||||
from typing import Any, Callable, Optional, Tuple, Union
|
||||
|
|
@ -9,25 +18,16 @@ import torch.nn as nn
|
|||
from torch._functorch.eager_transforms import argnums_t
|
||||
from torch._functorch.vmap import in_dims_t, out_dims_t
|
||||
|
||||
"""
|
||||
The APIs in this file are exposed as `functorch.*`. They are thin wrappers
|
||||
around the torch.func.* APIs that have deprecation warnings -- we're trying
|
||||
to move people to the torch.func.* equivalents.
|
||||
|
||||
NB: We don't use *args, **kwargs in the signatures because that changes the
|
||||
documentation.
|
||||
"""
|
||||
|
||||
|
||||
def get_warning(api, new_api=None, replace_newlines=False):
|
||||
if new_api is None:
|
||||
new_api = f"torch.func.{api}"
|
||||
warning = (
|
||||
f"We've integrated functorch into PyTorch. As the final step of the \n"
|
||||
f"integration, functorch.{api} is deprecated as of PyTorch \n"
|
||||
f"integration, `functorch.{api}` is deprecated as of PyTorch \n"
|
||||
f"2.0 and will be deleted in a future version of PyTorch >= 2.3. \n"
|
||||
f"Please use {new_api} instead; see the PyTorch 2.0 release notes \n"
|
||||
f"and/or the torch.func migration guide for more details \n"
|
||||
f"Please use `{new_api}` instead; see the PyTorch 2.0 release notes \n"
|
||||
f"and/or the `torch.func` migration guide for more details \n"
|
||||
f"https://pytorch.org/docs/main/func.migrating.html"
|
||||
)
|
||||
if replace_newlines:
|
||||
|
|
@ -37,7 +37,7 @@ def get_warning(api, new_api=None, replace_newlines=False):
|
|||
|
||||
def warn_deprecated(api, new_api=None):
|
||||
warning = get_warning(api, new_api, replace_newlines=True)
|
||||
warnings.warn(warning, stacklevel=2)
|
||||
warnings.warn(warning, FutureWarning, stacklevel=3)
|
||||
|
||||
|
||||
def setup_docs(functorch_api, torch_func_api=None, new_api_name=None):
|
||||
|
|
|
|||
|
|
@ -16,7 +16,8 @@ __all__ = ["tree_map_", "treespec_pprint"]
|
|||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("always")
|
||||
warnings.warn(
|
||||
"torch._functorch.pytree_hacks is deprecated and will be removed in a future release. "
|
||||
"Please use torch.utils._pytree instead.",
|
||||
"`torch._functorch.pytree_hacks` is deprecated and will be removed in a future release. "
|
||||
"Please `use torch.utils._pytree` instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -2750,7 +2750,8 @@ class FixedLayout(Layout):
|
|||
"""A closure containing math to read a given element"""
|
||||
|
||||
def indexer(index):
|
||||
assert len(index) == len(self.stride) == len(self.size)
|
||||
assert len(index) == len(self.stride)
|
||||
assert len(index) == len(self.size)
|
||||
result = self.offset
|
||||
for idx, stride, sz in zip(index, self.stride, self.size):
|
||||
if sz != 1:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import contextlib
|
||||
import functools
|
||||
import warnings
|
||||
from typing import Callable, Optional
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch._library.utils import Kernel, RegistrationHandle
|
||||
|
|
@ -124,10 +124,11 @@ class AbstractImplCtx:
|
|||
self._shape_env = _fake_mode.shape_env
|
||||
self._op = _op
|
||||
|
||||
@deprecated(
|
||||
"`create_unbacked_symint` is deprecated, please use `new_dynamic_size` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def create_unbacked_symint(self, *, min=2, max=None) -> torch.SymInt:
|
||||
warnings.warn(
|
||||
"create_unbacked_symint is deprecated, please use new_dynamic_size instead"
|
||||
)
|
||||
return self.new_dynamic_size(min=min, max=max)
|
||||
|
||||
def new_dynamic_size(self, *, min=0, max=None) -> torch.SymInt:
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ from typing import (
|
|||
TYPE_CHECKING,
|
||||
Union,
|
||||
)
|
||||
from typing_extensions import TypeAlias
|
||||
from typing_extensions import deprecated, TypeAlias
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
|
@ -1789,6 +1789,11 @@ def check_in_bounds_for_storage(
|
|||
# NOTE: This function should ideally be removed, but some Meta internal models
|
||||
# packaged with `torch.package` are using it, so it will have to be removed
|
||||
# at some point in the future when those models no longer use this function.
|
||||
@deprecated(
|
||||
"`torch._prims_common.check` is deprecated and will be removed in the future. "
|
||||
"Please use `torch._check*` functions instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def check(
|
||||
b: bool, s: Callable[[], str], exc_type: Type[Exception] = RuntimeError
|
||||
) -> None:
|
||||
|
|
@ -1801,12 +1806,6 @@ def check(
|
|||
.. note:: This function is planned for removal in the future. Please use
|
||||
`torch._check*` functions instead.
|
||||
"""
|
||||
warnings.warn(
|
||||
DeprecationWarning(
|
||||
"'torch._prims_common.check' will be removed in the future. Please use "
|
||||
"'torch._check*' functions instead"
|
||||
)
|
||||
)
|
||||
torch._check_with(exc_type, b, s)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import functools
|
||||
import warnings
|
||||
from typing import Any, Callable, List, Optional, Tuple, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
|
@ -190,14 +190,14 @@ def _get_name(func: Callable):
|
|||
# vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors,
|
||||
# sends those into func, and then unwraps the output BatchedTensors. Operations
|
||||
# on BatchedTensors perform the batched operations that the user is asking for.
|
||||
@deprecated(
|
||||
"Please use `torch.vmap` instead of `torch._vmap_internals.vmap`.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def vmap(func: Callable, in_dims: in_dims_t = 0, out_dims: out_dims_t = 0) -> Callable:
|
||||
"""
|
||||
Please use torch.vmap instead of this API.
|
||||
"""
|
||||
warnings.warn(
|
||||
"Please use torch.vmap instead of torch._vmap_internals.vmap. ",
|
||||
stacklevel=2,
|
||||
)
|
||||
return _vmap(func, in_dims, out_dims)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -224,7 +224,6 @@ class MultiheadAttention(nn.MultiheadAttention):
|
|||
|
||||
return fp
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_observed(cls, other):
|
||||
# The whole flow is float -> observed -> quantized
|
||||
|
|
@ -336,7 +335,11 @@ class MultiheadAttention(nn.MultiheadAttention):
|
|||
|
||||
if attn_mask is not None:
|
||||
if attn_mask.dtype == torch.uint8:
|
||||
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
|
||||
warnings.warn(
|
||||
"Byte tensor for `attn_mask` in `nn.MultiheadAttention` is deprecated. "
|
||||
"Use bool tensor instead.",
|
||||
stacklevel=3,
|
||||
)
|
||||
attn_mask = attn_mask.to(torch.bool)
|
||||
assert attn_mask.is_floating_point() or attn_mask.dtype == torch.bool, \
|
||||
f'Only float and bool types are supported for attn_mask, not {attn_mask.dtype}'
|
||||
|
|
@ -354,7 +357,11 @@ class MultiheadAttention(nn.MultiheadAttention):
|
|||
|
||||
# convert ByteTensor key_padding_mask to bool
|
||||
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
|
||||
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
|
||||
warnings.warn(
|
||||
"Byte tensor for `key_padding_mask` in `nn.MultiheadAttention` is deprecated. "
|
||||
"Use bool tensor instead.",
|
||||
stacklevel=3,
|
||||
)
|
||||
key_padding_mask = key_padding_mask.to(torch.bool)
|
||||
if self.bias_k is not None and self.bias_v is not None:
|
||||
if static_k is None and static_v is None:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import numbers
|
||||
import warnings
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
|
@ -16,8 +17,11 @@ def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Ten
|
|||
return tensor.index_select(dim, permutation)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`apply_permutation` is deprecated, please use `tensor.index_select(dim, permutation)` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
|
||||
warnings.warn("apply_permutation is deprecated, please use tensor.index_select(dim, permutation) instead")
|
||||
return _apply_permutation(tensor, permutation, dim)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -949,24 +949,33 @@ def convert(
|
|||
if convert_custom_config is None:
|
||||
convert_custom_config = ConvertCustomConfig()
|
||||
|
||||
if isinstance(convert_custom_config, Dict):
|
||||
if isinstance(convert_custom_config, dict):
|
||||
warnings.warn(
|
||||
"Passing a convert_custom_config_dict to convert is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a ConvertCustomConfig instead.")
|
||||
"in a future version. Please pass in a ConvertCustomConfig instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
convert_custom_config = ConvertCustomConfig.from_dict(convert_custom_config)
|
||||
|
||||
if isinstance(qconfig_mapping, Dict):
|
||||
if isinstance(qconfig_mapping, dict):
|
||||
warnings.warn(
|
||||
"Passing a QConfig dictionary to convert is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a QConfigMapping instead.")
|
||||
"in a future version. Please pass in a QConfigMapping instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
qconfig_mapping = QConfigMapping.from_dict(qconfig_mapping) if qconfig_mapping else None
|
||||
qconfig_mapping = copy.deepcopy(qconfig_mapping)
|
||||
assert qconfig_mapping is None or isinstance(qconfig_mapping, QConfigMapping)
|
||||
|
||||
if isinstance(backend_config, Dict):
|
||||
if isinstance(backend_config, dict):
|
||||
warnings.warn(
|
||||
"Passing a backend_config_dict to prepare is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a BackendConfig instead.")
|
||||
"in a future version. Please pass in a BackendConfig instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
backend_config = BackendConfig.from_dict(backend_config)
|
||||
|
||||
if backend_config is None:
|
||||
|
|
|
|||
|
|
@ -52,16 +52,22 @@ def fuse(
|
|||
if fuse_custom_config is None:
|
||||
fuse_custom_config = FuseCustomConfig()
|
||||
|
||||
if isinstance(fuse_custom_config, Dict):
|
||||
if isinstance(fuse_custom_config, dict):
|
||||
warnings.warn(
|
||||
"Passing a fuse_custom_config_dict to fuse is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a FuseCustomConfig instead.")
|
||||
"in a future version. Please pass in a FuseCustomConfig instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config)
|
||||
|
||||
if isinstance(backend_config, Dict):
|
||||
if isinstance(backend_config, dict):
|
||||
warnings.warn(
|
||||
"Passing a backend_config_dict to prepare is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a BackendConfig instead.")
|
||||
"in a future version. Please pass in a BackendConfig instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
backend_config = BackendConfig.from_dict(backend_config)
|
||||
|
||||
named_modules = dict(model.named_modules())
|
||||
|
|
|
|||
|
|
@ -1749,28 +1749,40 @@ def prepare(
|
|||
if _equalization_config is None:
|
||||
_equalization_config = QConfigMapping()
|
||||
|
||||
if isinstance(qconfig_mapping, Dict):
|
||||
if isinstance(qconfig_mapping, dict):
|
||||
warnings.warn(
|
||||
"Passing a QConfig dictionary to prepare is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a QConfigMapping instead.")
|
||||
"in a future version. Please pass in a QConfigMapping instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
qconfig_mapping = QConfigMapping.from_dict(qconfig_mapping)
|
||||
|
||||
if isinstance(_equalization_config, Dict):
|
||||
if isinstance(_equalization_config, dict):
|
||||
warnings.warn(
|
||||
"Passing a QConfig dictionary to prepare for equalization is deprecated and will not "
|
||||
"be supported in a future version. Please pass in a QConfigMapping instead.")
|
||||
"be supported in a future version. Please pass in a QConfigMapping instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
_equalization_config = QConfigMapping.from_dict(_equalization_config)
|
||||
|
||||
if isinstance(prepare_custom_config, Dict):
|
||||
if isinstance(prepare_custom_config, dict):
|
||||
warnings.warn(
|
||||
"Passing a prepare_custom_config_dict to prepare is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a PrepareCustomConfig instead.")
|
||||
"in a future version. Please pass in a PrepareCustomConfig instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
prepare_custom_config = PrepareCustomConfig.from_dict(prepare_custom_config)
|
||||
|
||||
if isinstance(backend_config, Dict):
|
||||
if isinstance(backend_config, dict):
|
||||
warnings.warn(
|
||||
"Passing a backend_config_dict to prepare is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a BackendConfig instead.")
|
||||
"in a future version. Please pass in a BackendConfig instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
backend_config = BackendConfig.from_dict(backend_config)
|
||||
|
||||
assert isinstance(qconfig_mapping, QConfigMapping)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
from collections import namedtuple
|
||||
from typing import Optional, Any, Union, Type
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
|
@ -106,6 +107,10 @@ class QConfig(namedtuple('QConfig', ['activation', 'weight'])):
|
|||
return super().__new__(cls, activation, weight)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`QConfigDynamic` is going to be deprecated in PyTorch 1.12, please use `QConfig` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
class QConfigDynamic(namedtuple('QConfigDynamic', ['activation', 'weight'])):
|
||||
"""
|
||||
Describes how to dynamically quantize a layer or a part of the network by providing
|
||||
|
|
@ -127,7 +132,6 @@ class QConfigDynamic(namedtuple('QConfigDynamic', ['activation', 'weight'])):
|
|||
if isinstance(weight, nn.Module):
|
||||
raise ValueError("QConfigDynamic received observer instance, please pass observer class instead. " +
|
||||
"Use MyObserver.with_args(x=1) to override arguments to constructor if needed")
|
||||
warnings.warn("QConfigDynamic is going to be deprecated in PyTorch 1.12, please use QConfig instead")
|
||||
return super().__new__(cls, activation, weight)
|
||||
|
||||
|
||||
|
|
@ -422,16 +426,20 @@ _default_quint8_placeholder_qconfig = QConfig(
|
|||
weight=None,
|
||||
)
|
||||
|
||||
@deprecated(
|
||||
"`torch.ao.quantization.get_default_qconfig_dict` is deprecated and will be removed in "
|
||||
"a future version. Please use `torch.ao.quantization.get_default_qconfig_mapping` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def get_default_qconfig_dict(backend='x86', version=0):
|
||||
warnings.warn(
|
||||
"torch.ao.quantization.get_default_qconfig_dict is deprecated and will be removed in "
|
||||
"a future version. Please use torch.ao.quantization.get_default_qconfig_mapping instead.")
|
||||
return torch.ao.quantization.get_default_qconfig_mapping(backend, version).to_dict()
|
||||
|
||||
@deprecated(
|
||||
"`torch.ao.quantization.get_default_qat_qconfig_dict` is deprecated and will be removed in "
|
||||
"a future version. Please use `torch.ao.quantization.get_default_qat_qconfig_mapping` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def get_default_qat_qconfig_dict(backend='x86', version=1):
|
||||
warnings.warn(
|
||||
"torch.ao.quantization.get_default_qat_qconfig_dict is deprecated and will be removed in "
|
||||
"a future version. Please use torch.ao.quantization.get_default_qat_qconfig_mapping instead.")
|
||||
return torch.ao.quantization.get_default_qat_qconfig_mapping(backend, version).to_dict()
|
||||
|
||||
def _assert_valid_qconfig(qconfig: Optional[QConfig],
|
||||
|
|
|
|||
|
|
@ -117,10 +117,13 @@ forward graph of the parent module,
|
|||
if _equalization_config is None:
|
||||
_equalization_config = QConfigMapping()
|
||||
|
||||
if isinstance(prepare_custom_config, Dict):
|
||||
if isinstance(prepare_custom_config, dict):
|
||||
warnings.warn(
|
||||
"Passing a prepare_custom_config_dict to prepare is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a PrepareCustomConfig instead.")
|
||||
"in a future version. Please pass in a PrepareCustomConfig instead.",
|
||||
FutureWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
prepare_custom_config = PrepareCustomConfig.from_dict(prepare_custom_config)
|
||||
|
||||
# swap FloatFunctional with FXFloatFunctional
|
||||
|
|
@ -222,10 +225,13 @@ def fuse_fx(
|
|||
if fuse_custom_config is None:
|
||||
fuse_custom_config = FuseCustomConfig()
|
||||
|
||||
if isinstance(fuse_custom_config, Dict):
|
||||
if isinstance(fuse_custom_config, dict):
|
||||
warnings.warn(
|
||||
"Passing a fuse_custom_config_dict to fuse is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a FuseCustomConfig instead.")
|
||||
"in a future version. Please pass in a FuseCustomConfig instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config)
|
||||
|
||||
torch._C._log_api_usage_once("quantization_api.quantize_fx.fuse_fx")
|
||||
|
|
@ -511,10 +517,13 @@ def _convert_fx(
|
|||
if convert_custom_config is None:
|
||||
convert_custom_config = ConvertCustomConfig()
|
||||
|
||||
if isinstance(convert_custom_config, Dict):
|
||||
if isinstance(convert_custom_config, dict):
|
||||
warnings.warn(
|
||||
"Passing a convert_custom_config_dict to convert is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a ConvertCustomConfig instead.")
|
||||
"in a future version. Please pass in a ConvertCustomConfig instead.",
|
||||
FutureWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
convert_custom_config = ConvertCustomConfig.from_dict(convert_custom_config)
|
||||
|
||||
_check_is_graph_module(graph_module)
|
||||
|
|
|
|||
|
|
@ -252,17 +252,21 @@ def backward(
|
|||
)
|
||||
|
||||
if grad_variables is not None:
|
||||
warnings.warn("'grad_variables' is deprecated. Use 'grad_tensors' instead.")
|
||||
warnings.warn(
|
||||
"`grad_variables` is deprecated. Use `grad_tensors` instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if grad_tensors is None:
|
||||
grad_tensors = grad_variables
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"'grad_tensors' and 'grad_variables' (deprecated) "
|
||||
"arguments both passed to backward(). Please only "
|
||||
"use 'grad_tensors'."
|
||||
"`grad_tensors` and `grad_variables` (deprecated) "
|
||||
"arguments both passed to `backward()`. Please only "
|
||||
"use `grad_tensors`."
|
||||
)
|
||||
if inputs is not None and len(inputs) == 0:
|
||||
raise RuntimeError("'inputs' argument to backward() cannot be empty.")
|
||||
raise RuntimeError("`inputs` argument to `backward()` cannot be empty.")
|
||||
|
||||
tensors = (tensors,) if isinstance(tensors, torch.Tensor) else tuple(tensors)
|
||||
inputs = (
|
||||
|
|
@ -395,7 +399,9 @@ def grad(
|
|||
warnings.warn(
|
||||
"only_inputs argument is deprecated and is ignored now "
|
||||
"(defaults to True). To accumulate gradient for other "
|
||||
"parts of the graph, please use torch.autograd.backward."
|
||||
"parts of the graph, please use torch.autograd.backward.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
grad_outputs_ = _tensor_or_tensors_to_tuple(grad_outputs, len(t_outputs))
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import operator
|
||||
import warnings
|
||||
from functools import reduce
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch._utils
|
||||
|
|
@ -9,11 +9,12 @@ from ..function import Function
|
|||
|
||||
class Type(Function):
|
||||
@staticmethod
|
||||
@deprecated(
|
||||
"`torch.autograd._functions.Type` is deprecated as of PyTorch 2.1, "
|
||||
"please use `torch.tensor.to(dtype=dtype)` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def forward(ctx, i, dest_type):
|
||||
warnings.warn(
|
||||
"torch.autograd._functions.Type is deprecated as of PyTorch 2.1, please use "
|
||||
"torch.tensor.to(dtype=dtype) instead."
|
||||
)
|
||||
ctx.input_type = type(i)
|
||||
ctx.input_device = -1 if not i.is_cuda else i.get_device()
|
||||
return i.type(dest_type)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import itertools
|
|||
import warnings
|
||||
from collections import OrderedDict
|
||||
from typing import Any, List, Optional, Tuple
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch._C as _C
|
||||
|
|
@ -179,12 +180,14 @@ class FunctionCtx:
|
|||
"""
|
||||
self.dirty_tensors = args
|
||||
|
||||
@deprecated(
|
||||
"`mark_shared_storage` is deprecated. "
|
||||
"Tensors with shared storages are automatically tracked. "
|
||||
"Note that calls to `set_()` are not tracked",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def mark_shared_storage(self, *pairs):
|
||||
warnings.warn(
|
||||
"mark_shared_storage is deprecated. "
|
||||
"Tensors with shared storages are automatically tracked. Note "
|
||||
"that calls to `set_()` are not tracked"
|
||||
)
|
||||
pass
|
||||
|
||||
def mark_non_differentiable(self, *args: torch.Tensor):
|
||||
r"""Mark outputs as non-differentiable.
|
||||
|
|
@ -491,9 +494,8 @@ class Function(_SingleLevelFunction):
|
|||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
cls = self.__class__
|
||||
warnings.warn(
|
||||
f"{cls} should not be instantiated. Methods on autograd functions"
|
||||
f"{self.__class__} should not be instantiated. Methods on autograd functions"
|
||||
"are all static, so you should invoke them on the class itself. "
|
||||
"Instantiating an autograd function will raise an "
|
||||
"error in a future version of PyTorch.",
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import functools
|
|||
import warnings
|
||||
from itertools import product
|
||||
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.testing
|
||||
|
|
@ -306,6 +307,14 @@ def _get_numerical_jacobian(
|
|||
return jacobians
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`get_numerical_jacobian` was part of PyTorch's private API and not "
|
||||
"meant to be exposed. We are deprecating it and it will be removed "
|
||||
"in a future version of PyTorch. If you have a specific use for "
|
||||
"this or feature request for this to be a stable API, please file "
|
||||
"us an issue at https://github.com/pytorch/pytorch/issues/new",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def get_numerical_jacobian(fn, inputs, target=None, eps=1e-3, grad_out=1.0):
|
||||
"""Compute the numerical Jacobian for a given fn and its inputs.
|
||||
|
||||
|
|
@ -325,13 +334,6 @@ def get_numerical_jacobian(fn, inputs, target=None, eps=1e-3, grad_out=1.0):
|
|||
Note that `target` may not even be part of `input` to `fn`, so please be
|
||||
**very careful** in this to not clone `target`.
|
||||
"""
|
||||
warnings.warn(
|
||||
"get_numerical_jacobian was part of PyTorch's private API and not "
|
||||
"meant to be exposed. We are deprecating it and it will be removed "
|
||||
"in a future version of PyTorch. If you have a specific use for "
|
||||
"this or feature request for this to be a stable API, please file "
|
||||
"us an issue at https://github.com/pytorch/pytorch/issues/new"
|
||||
)
|
||||
if (
|
||||
grad_out != 1.0
|
||||
): # grad_out param is only kept for backward compatibility reasons
|
||||
|
|
@ -818,16 +820,17 @@ def _get_analytical_vJu_backward_mode(
|
|||
return reduced_jacobians
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`get_analytical_jacobian` was part of PyTorch's private API and not "
|
||||
"meant to be exposed. We are deprecating it and it will be removed "
|
||||
"in a future version of PyTorch. If you have a specific use for "
|
||||
"this or feature request for this to be a stable API, please file "
|
||||
"us an issue at https://github.com/pytorch/pytorch/issues/new",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def get_analytical_jacobian(inputs, output, nondet_tol=0.0, grad_out=1.0):
|
||||
# Replicates the behavior of the old get_analytical_jacobian before the refactor
|
||||
# This shares much of its code with _check_analytical_jacobian_attributes
|
||||
warnings.warn(
|
||||
"get_analytical_jacobian was part of PyTorch's private API and not "
|
||||
"meant to be exposed. We are deprecating it and it will be removed "
|
||||
"in a future version of PyTorch. If you have a specific use for "
|
||||
"this or feature request for this to be a stable API, please file "
|
||||
"us an issue at https://github.com/pytorch/pytorch/issues/new"
|
||||
)
|
||||
if (
|
||||
grad_out != 1.0
|
||||
): # grad_out param is only kept for backward compatibility reasons
|
||||
|
|
|
|||
|
|
@ -213,7 +213,10 @@ class profile:
|
|||
self.use_cuda = use_cuda
|
||||
if self.use_cuda:
|
||||
warn(
|
||||
"The attribute `use_cuda` will be deprecated soon, please use ``use_device = 'cuda'`` instead."
|
||||
"The attribute `use_cuda` will be deprecated soon, "
|
||||
"please use ``use_device = 'cuda'`` instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
self.use_device: Optional[str] = "cuda"
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import itertools
|
||||
from warnings import warn
|
||||
import warnings
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.cuda
|
||||
|
|
@ -23,6 +24,11 @@ from torch.autograd.profiler_util import (
|
|||
__all__ = ["profile"]
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.autograd.profiler_legacy.profile` is deprecated and will be removed in a future release. "
|
||||
"Please use `torch.profiler` instead.",
|
||||
category=None, # TODO: change to `FutureWarning`
|
||||
)
|
||||
class profile:
|
||||
"""DEPRECATED: use torch.profiler instead."""
|
||||
|
||||
|
|
@ -51,7 +57,10 @@ class profile:
|
|||
self.with_modules = with_modules
|
||||
|
||||
if self.use_cuda and not torch.cuda.is_available():
|
||||
warn("CUDA is not available, disabling CUDA profiling")
|
||||
warnings.warn(
|
||||
"CUDA is not available, disabling CUDA profiling",
|
||||
stacklevel=2,
|
||||
)
|
||||
self.use_cuda = False
|
||||
|
||||
if self.use_cuda:
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from collections import defaultdict, namedtuple
|
|||
from operator import attrgetter
|
||||
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch.autograd import DeviceType
|
||||
|
|
@ -415,6 +416,10 @@ class FormattedTimesMixin:
|
|||
return 0.0 if self.count == 0 else 1.0 * self.device_time_total / self.count # type: ignore[attr-defined]
|
||||
|
||||
@property
|
||||
@deprecated(
|
||||
"`cuda_time` is deprecated, please use `device_time` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def cuda_time(self): # To be deprecated
|
||||
return self.device_time
|
||||
|
||||
|
|
@ -538,8 +543,12 @@ class FunctionEvent(FormattedTimesMixin):
|
|||
)
|
||||
|
||||
@property
|
||||
@deprecated(
|
||||
"`self_cuda_memory_usage` is deprecated. Use `self_device_memory_usage` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def self_cuda_memory_usage(self): # To be deprecated
|
||||
self.self_device_memory_usage
|
||||
return self.self_device_memory_usage
|
||||
|
||||
@property
|
||||
def cpu_time_total(self):
|
||||
|
|
@ -574,8 +583,12 @@ class FunctionEvent(FormattedTimesMixin):
|
|||
return self.time_range.elapsed_us()
|
||||
|
||||
@property
|
||||
@deprecated(
|
||||
"`cuda_time_total` is deprecated. Use `device_time_total` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def cuda_time_total(self): # To be deprecated
|
||||
self.device_time_total
|
||||
return self.device_time_total
|
||||
|
||||
@property
|
||||
def self_device_time_total(self):
|
||||
|
|
@ -590,8 +603,12 @@ class FunctionEvent(FormattedTimesMixin):
|
|||
return self.device_time_total
|
||||
|
||||
@property
|
||||
@deprecated(
|
||||
"`self_cuda_time_total` is deprecated. Use `self_device_time_total` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def self_cuda_time_total(self): # To be deprecated
|
||||
self.self_device_time_total
|
||||
return self.self_device_time_total
|
||||
|
||||
@property
|
||||
def key(self):
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import contextlib
|
||||
import warnings
|
||||
|
||||
from typing import Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -377,6 +377,15 @@ def enable_cudnn_sdp(enabled: bool):
|
|||
|
||||
|
||||
@contextlib.contextmanager
|
||||
@deprecated(
|
||||
(
|
||||
"`torch.backends.cuda.sdp_kernel()` is deprecated. "
|
||||
"In the future, this context manager will be removed. "
|
||||
"Please see `torch.nn.attention.sdpa_kernel()` for the new context manager, "
|
||||
"with updated signature."
|
||||
),
|
||||
category=FutureWarning,
|
||||
)
|
||||
def sdp_kernel(
|
||||
enable_flash: bool = True,
|
||||
enable_math: bool = True,
|
||||
|
|
@ -389,15 +398,6 @@ def sdp_kernel(
|
|||
This context manager can be used to temporarily enable or disable any of the three backends for scaled dot product attention.
|
||||
Upon exiting the context manager, the previous state of the flags will be restored.
|
||||
"""
|
||||
warnings.warn(
|
||||
(
|
||||
"torch.backends.cuda.sdp_kernel() "
|
||||
"is deprecated. In the future, this context manager will be removed. "
|
||||
"Please see, torch.nn.attention.sdpa_kernel() for the new context manager, with updated "
|
||||
"signature."
|
||||
),
|
||||
FutureWarning,
|
||||
)
|
||||
from torch.nn.attention import sdpa_kernel
|
||||
|
||||
backend_list = []
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import warnings
|
||||
from typing import Any
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -12,6 +12,11 @@ class autocast(torch.amp.autocast_mode.autocast):
|
|||
``torch.cpu.amp.autocast(args...)`` is deprecated. Please use ``torch.amp.autocast("cpu", args...)`` instead.
|
||||
"""
|
||||
|
||||
@deprecated(
|
||||
"`torch.cpu.amp.autocast(args...)` is deprecated. "
|
||||
"Please use `torch.amp.autocast('cpu', args...)` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def __init__(
|
||||
self,
|
||||
enabled: bool = True,
|
||||
|
|
@ -23,10 +28,6 @@ class autocast(torch.amp.autocast_mode.autocast):
|
|||
self.device = "cpu"
|
||||
self.fast_dtype = dtype
|
||||
return
|
||||
warnings.warn(
|
||||
"torch.cpu.amp.autocast(args...) is deprecated. Please use torch.amp.autocast('cpu', args...) instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
super().__init__(
|
||||
"cpu", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import warnings
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -11,6 +11,11 @@ class GradScaler(torch.amp.GradScaler):
|
|||
``torch.cpu.amp.GradScaler(args...)`` is deprecated. Please use ``torch.amp.GradScaler("cpu", args...)`` instead.
|
||||
"""
|
||||
|
||||
@deprecated(
|
||||
"`torch.cpu.amp.GradScaler(args...)` is deprecated. "
|
||||
"Please use `torch.amp.GradScaler('cpu', args...)` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def __init__(
|
||||
self,
|
||||
init_scale: float = 2.0**16,
|
||||
|
|
@ -19,9 +24,6 @@ class GradScaler(torch.amp.GradScaler):
|
|||
growth_interval: int = 2000,
|
||||
enabled: bool = True,
|
||||
) -> None:
|
||||
warnings.warn(
|
||||
"torch.cpu.amp.GradScaler(args...) is deprecated. Please use torch.amp.GradScaler('cpu', args...) instead."
|
||||
)
|
||||
super().__init__(
|
||||
"cpu",
|
||||
init_scale=init_scale,
|
||||
|
|
|
|||
|
|
@ -145,8 +145,8 @@ def compare(before, after, format_flamegraph=format_flamegraph):
|
|||
before_segs = {_seg_key(seg) for seg in before}
|
||||
after_segs = {_seg_key(seg) for seg in after}
|
||||
|
||||
print(f'only_before = {[a for a,_ in (before_segs - after_segs)]}')
|
||||
print(f'only_after = {[a for a,_ in (after_segs - before_segs)]}')
|
||||
print(f'only_before = {[a for a, _ in (before_segs - after_segs)]}')
|
||||
print(f'only_after = {[a for a, _ in (after_segs - before_segs)]}')
|
||||
|
||||
for seg in before:
|
||||
if _seg_key(seg) not in after_segs:
|
||||
|
|
@ -382,7 +382,11 @@ add_local_files(local_files, $VIZ_KIND)
|
|||
|
||||
def _format_viz(data, viz_kind, device):
|
||||
if device is not None:
|
||||
warnings.warn('device argument is deprecated, plots now contain all device')
|
||||
warnings.warn(
|
||||
'device argument is deprecated, plots now contain all device',
|
||||
FutureWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
buffer = pickle.dumps(data)
|
||||
buffer += b'\x00' * (3 - len(buffer) % 3)
|
||||
# Encode the buffer with base64
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import functools
|
||||
import warnings
|
||||
from typing import Any
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -13,6 +13,11 @@ class autocast(torch.amp.autocast_mode.autocast):
|
|||
``torch.cuda.amp.autocast(args...)`` is deprecated. Please use ``torch.amp.autocast("cuda", args...)`` instead.
|
||||
"""
|
||||
|
||||
@deprecated(
|
||||
"`torch.cuda.amp.autocast(args...)` is deprecated. "
|
||||
"Please use `torch.amp.autocast('cuda', args...)` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def __init__(
|
||||
self,
|
||||
enabled: bool = True,
|
||||
|
|
@ -24,10 +29,6 @@ class autocast(torch.amp.autocast_mode.autocast):
|
|||
self.device = "cuda"
|
||||
self.fast_dtype = dtype
|
||||
return
|
||||
warnings.warn(
|
||||
"torch.cuda.amp.autocast(args...) is deprecated. Please use torch.amp.autocast('cuda', args...) instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
super().__init__(
|
||||
"cuda", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled
|
||||
)
|
||||
|
|
@ -50,29 +51,38 @@ class autocast(torch.amp.autocast_mode.autocast):
|
|||
|
||||
|
||||
# Preserved only for BC reasons
|
||||
@deprecated(
|
||||
"`torch.cuda.amp.autocast_mode._cast(value, dtype)` is deprecated. "
|
||||
"Please use `torch.amp.autocast_mode._cast(value, 'cuda', dtype)` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def _cast(value, dtype):
|
||||
return torch.amp.autocast_mode._cast(value, "cuda", dtype)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.cuda.amp.custom_fwd(args...)` is deprecated. "
|
||||
"Please use `torch.amp.custom_fwd(args..., device_type='cuda')` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def custom_fwd(fwd=None, *, cast_inputs=None):
|
||||
"""
|
||||
``torch.cuda.amp.custom_fwd(args...)`` is deprecated. Please use
|
||||
``torch.amp.custom_fwd(args..., device_type='cuda')`` instead.
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.cuda.amp.custom_fwd(args...) is deprecated. Please use torch.amp.custom_fwd(args..., device_type='cuda') instead."
|
||||
)
|
||||
return functools.partial(torch.amp.custom_fwd, device_type="cuda")(
|
||||
fwd=fwd, cast_inputs=cast_inputs
|
||||
)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.cuda.amp.custom_bwd(args...)` is deprecated. "
|
||||
"Please use `torch.amp.custom_bwd(args..., device_type='cuda')` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def custom_bwd(bwd):
|
||||
"""
|
||||
``torch.cuda.amp.custom_bwd(args...)`` is deprecated. Please use
|
||||
``torch.amp.custom_bwd(args..., device_type='cuda')`` instead.
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.cuda.amp.custom_bwd(args...) is deprecated. Please use torch.amp.custom_bwd(args..., device_type='cuda') instead."
|
||||
)
|
||||
return functools.partial(torch.amp.custom_bwd, device_type="cuda")(bwd)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import warnings
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -11,6 +11,11 @@ class GradScaler(torch.amp.GradScaler):
|
|||
``torch.cuda.amp.GradScaler(args...)`` is deprecated. Please use ``torch.amp.GradScaler("cuda", args...)`` instead.
|
||||
"""
|
||||
|
||||
@deprecated(
|
||||
"`torch.cuda.amp.GradScaler(args...)` is deprecated. "
|
||||
"Please use `torch.amp.GradScaler('cuda', args...)` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def __init__(
|
||||
self,
|
||||
init_scale: float = 2.0**16,
|
||||
|
|
@ -19,9 +24,6 @@ class GradScaler(torch.amp.GradScaler):
|
|||
growth_interval: int = 2000,
|
||||
enabled: bool = True,
|
||||
) -> None:
|
||||
warnings.warn(
|
||||
"torch.cuda.amp.GradScaler(args...) is deprecated. Please use torch.amp.GradScaler('cuda', args...) instead."
|
||||
)
|
||||
super().__init__(
|
||||
"cuda",
|
||||
init_scale=init_scale,
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import warnings
|
|||
from inspect import signature
|
||||
|
||||
from typing import Any, Dict, Optional, Tuple, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch import _C
|
||||
|
|
@ -446,21 +447,21 @@ def max_memory_reserved(device: Union[Device, int] = None) -> int:
|
|||
return memory_stats(device=device).get("reserved_bytes.all.peak", 0)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.cuda.memory_cached` has been renamed to `torch.cuda.memory_reserved`",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def memory_cached(device: Union[Device, int] = None) -> int:
|
||||
r"""Deprecated; see :func:`~torch.cuda.memory_reserved`."""
|
||||
warnings.warn(
|
||||
"torch.cuda.memory_cached has been renamed to torch.cuda.memory_reserved",
|
||||
FutureWarning,
|
||||
)
|
||||
return memory_reserved(device=device)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.cuda.max_memory_cached` has been renamed to `torch.cuda.max_memory_reserved`",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def max_memory_cached(device: Union[Device, int] = None) -> int:
|
||||
r"""Deprecated; see :func:`~torch.cuda.max_memory_reserved`."""
|
||||
warnings.warn(
|
||||
"torch.cuda.max_memory_cached has been renamed to torch.cuda.max_memory_reserved",
|
||||
FutureWarning,
|
||||
)
|
||||
return max_memory_reserved(device=device)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -89,8 +89,10 @@ def reduce(
|
|||
)
|
||||
else:
|
||||
warnings.warn(
|
||||
"nccl.reduce with an output tensor list is deprecated. "
|
||||
"Please specify a single output tensor with argument 'output' instead instead."
|
||||
"`nccl.reduce` with an output tensor list is deprecated. "
|
||||
"Please specify a single output tensor with argument 'output' instead instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
_output = outputs[root]
|
||||
elif not isinstance(output, torch.Tensor) and isinstance(
|
||||
|
|
@ -99,7 +101,9 @@ def reduce(
|
|||
# User called old API with positional arguments of list of output tensors.
|
||||
warnings.warn(
|
||||
"nccl.reduce with an output tensor list is deprecated. "
|
||||
"Please specify a single output tensor."
|
||||
"Please specify a single output tensor.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
_output = output[root]
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import warnings
|
||||
from typing import Callable, Iterable, Optional, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
|
@ -38,6 +38,13 @@ from torch.distributed.fsdp.wrap import _Policy
|
|||
|
||||
|
||||
@contract(state_cls=_FSDPState)
|
||||
@deprecated(
|
||||
"`torch.distributed._composable.fully_shard` is being deprecated. "
|
||||
"You can continue to use the wrapper based FSDP. "
|
||||
"See usage in: https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/fully_sharded_data_parallel.py. "
|
||||
"`torch.distributed._composable.fully_shard` will be removed after PyTorch 2.5.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def fully_shard(
|
||||
module: nn.Module,
|
||||
*,
|
||||
|
|
@ -55,16 +62,7 @@ def fully_shard(
|
|||
Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]]
|
||||
] = None,
|
||||
) -> nn.Module:
|
||||
"""
|
||||
Applies ``FullyShardedDataParallel` (FSDP) semantics to ``module``.
|
||||
"""
|
||||
warnings.warn(
|
||||
"``torch.distributed._composable.fully_shard`` is being deprecated."
|
||||
"You can contintue to use the wrapper based FSDP."
|
||||
"See usage in: https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/fully_sharded_data_parallel.py."
|
||||
"``torch.distributed._composable.fully_shard`` will be removed after PyTorch 2.5."
|
||||
)
|
||||
|
||||
"""Applies ``FullyShardedDataParallel`` (FSDP) semantics to ``module``."""
|
||||
torch._C._log_api_usage_once("torch.distributed.fully_shard")
|
||||
# Enforce the new auto wrap policy
|
||||
if policy is not None and not isinstance(policy, _Policy):
|
||||
|
|
|
|||
|
|
@ -766,7 +766,9 @@ def _resolve_group_name(group: RANK_TYPES, tag: str = "") -> str:
|
|||
warnings.warn(
|
||||
"The combination of ranks + tag as process group "
|
||||
"identifier has been deprecated. Please switch to "
|
||||
"using ProcessGroup, DeviceMesh, or group name instead."
|
||||
"using ProcessGroup, DeviceMesh, or group name instead.",
|
||||
FutureWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
return c10d._resolve_group_name_by_ranks_and_tag(cast(List[int], group), tag)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -5,8 +5,15 @@ import torch
|
|||
import warnings
|
||||
|
||||
from torch.distributed.checkpoint import * # noqa: F403
|
||||
warnings.warn(
|
||||
"torch.distributed._shard.checkpoint will be deprecated, use torch.distributed.checkpoint instead",
|
||||
DeprecationWarning
|
||||
)
|
||||
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("always")
|
||||
warnings.warn(
|
||||
"`torch.distributed._shard.checkpoint` will be deprecated, "
|
||||
"use `torch.distributed.checkpoint` instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
sys.modules['torch.distributed._shard.checkpoint'] = torch.distributed.checkpoint
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from typing import (
|
|||
cast,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
from typing_extensions import deprecated
|
||||
import copy
|
||||
import warnings
|
||||
from functools import reduce
|
||||
|
|
@ -396,7 +397,11 @@ class ShardedTensor(ShardedTensorBase):
|
|||
return reduce(operator.mul, shard_md.shard_sizes) # type: ignore[attr-defined]
|
||||
|
||||
if enforce_dtype:
|
||||
warnings.warn("enforce_dtype is deprecated. Please use dtype instead.")
|
||||
warnings.warn(
|
||||
"`enforce_dtype` is deprecated. Please use `dtype` instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
rank = dist.get_rank(self._process_group)
|
||||
full_size = self.metadata().size
|
||||
|
|
@ -737,6 +742,7 @@ class ShardedTensor(ShardedTensorBase):
|
|||
return sharded_tensor
|
||||
|
||||
@classmethod
|
||||
@deprecated(DEPRECATE_MSG, category=FutureWarning)
|
||||
def _init_from_local_tensor(
|
||||
cls,
|
||||
local_tensor: torch.Tensor,
|
||||
|
|
@ -801,8 +807,6 @@ class ShardedTensor(ShardedTensorBase):
|
|||
We fully rely on the user to ensure local tensor is sharded based on the
|
||||
sharding spec.
|
||||
"""
|
||||
warnings.warn(DEPRECATE_MSG)
|
||||
|
||||
if not local_tensor.is_contiguous():
|
||||
raise ValueError('local_tensor is not a contiguous Tensor.')
|
||||
|
||||
|
|
@ -980,6 +984,7 @@ class ShardedTensor(ShardedTensorBase):
|
|||
"""
|
||||
return self._sharding_spec
|
||||
|
||||
@deprecated(DEPRECATE_MSG, category=FutureWarning)
|
||||
def reshard(self, resharding_spec: shard_spec.ShardingSpec) -> ShardedTensor:
|
||||
"""
|
||||
Reshard a sharded tensor given the ``resharding_spec``. For now, we only support
|
||||
|
|
@ -1050,8 +1055,6 @@ class ShardedTensor(ShardedTensorBase):
|
|||
tensor([[3], [3], [5], [5], [7], [7], [9], [9]]) # Rank 2
|
||||
tensor([[4], [4], [6], [6], [8], [8], [10], [10]]) # Rank 3
|
||||
"""
|
||||
warnings.warn(DEPRECATE_MSG)
|
||||
|
||||
if (
|
||||
not isinstance(resharding_spec, shard_spec.ChunkShardingSpec) or
|
||||
not isinstance(self._sharding_spec, shard_spec.ChunkShardingSpec)
|
||||
|
|
@ -1096,6 +1099,7 @@ class ShardedTensor(ShardedTensorBase):
|
|||
return self.local_shards()[0].tensor
|
||||
|
||||
@classmethod
|
||||
@deprecated(DEPRECATE_MSG, category=FutureWarning)
|
||||
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
||||
def dispatch(st: ShardedTensor, func: Callable):
|
||||
# Dispatch to custom user provided op first if it exists.
|
||||
|
|
@ -1120,7 +1124,6 @@ class ShardedTensor(ShardedTensorBase):
|
|||
f"torch function '{func.__name__}', with args: {args} and "
|
||||
f"kwargs: {kwargs} not supported for ShardedTensor!")
|
||||
|
||||
warnings.warn(DEPRECATE_MSG)
|
||||
# Find ShardedTensor instance to get process_group and sharding_spec.
|
||||
st_instance = None
|
||||
|
||||
|
|
|
|||
|
|
@ -5,8 +5,14 @@ import torch
|
|||
import warnings
|
||||
|
||||
from torch.distributed._shard.sharded_tensor import * # noqa: F403
|
||||
warnings.warn(
|
||||
"torch.distributed._sharded_tensor will be deprecated, use torch.distributed._shard.sharded_tensor instead",
|
||||
DeprecationWarning
|
||||
)
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("always")
|
||||
warnings.warn(
|
||||
"`torch.distributed._sharded_tensor` will be deprecated, "
|
||||
"use `torch.distributed._shard.sharded_tensor` instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
sys.modules['torch.distributed._sharded_tensor'] = torch.distributed._shard.sharded_tensor
|
||||
|
|
|
|||
|
|
@ -5,10 +5,15 @@ import torch
|
|||
import warnings
|
||||
|
||||
from torch.distributed._shard.sharding_spec import * # noqa: F403
|
||||
warnings.warn(
|
||||
"torch.distributed._sharding_spec will be deprecated, use torch.distributed._shard.sharding_spec instead",
|
||||
DeprecationWarning
|
||||
)
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("always")
|
||||
warnings.warn(
|
||||
"`torch.distributed._sharding_spec` will be deprecated, "
|
||||
"use `torch.distributed._shard.sharding_spec` instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
import torch.distributed._shard.sharding_spec as _sharding_spec
|
||||
sys.modules['torch.distributed._sharding_spec'] = _sharding_spec
|
||||
|
|
|
|||
|
|
@ -746,6 +746,8 @@ def distribute_module(
|
|||
warnings.warn(
|
||||
"Deprecating input_fn that takes two arguments (inputs, device_mesh), "
|
||||
"please use input_fn that takes in (module, inputs, device_mesh) instead!",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
module.register_forward_pre_hook(lambda _, inputs: input_fn(inputs, device_mesh)) # type: ignore[call-arg]
|
||||
elif num_args == 3:
|
||||
|
|
@ -765,6 +767,8 @@ def distribute_module(
|
|||
warnings.warn(
|
||||
"Deprecating output_fn that takes two arguments (inputs, device_mesh), "
|
||||
"please use output_fn that takes in (module, inputs, device_mesh) instead!",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
module.register_forward_hook(
|
||||
lambda mod, inputs, outputs: output_fn(outputs, device_mesh) # type: ignore[call-arg]
|
||||
|
|
|
|||
|
|
@ -233,7 +233,8 @@ def checkpoint_wrapper(
|
|||
f"Please specify {CheckpointImpl.NO_REENTRANT} as "
|
||||
f"{CheckpointImpl.REENTRANT} will soon be removed as "
|
||||
"the default and eventually deprecated.",
|
||||
stacklevel=1,
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return CheckpointWrapper(
|
||||
module,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
import warnings
|
||||
from typing import Any, cast, Dict, Optional, Set, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
|
@ -17,6 +18,11 @@ from .utils import _all_gather_keys, _api_bc_check, _DistWrapper, _profile
|
|||
__all__ = ["load_state_dict", "load"]
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`load_state_dict` is deprecated and will be removed in future versions. "
|
||||
"Please use `load` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def load_state_dict(
|
||||
state_dict: Dict[str, Any],
|
||||
storage_reader: StorageReader,
|
||||
|
|
@ -26,10 +32,6 @@ def load_state_dict(
|
|||
planner: Optional[LoadPlanner] = None,
|
||||
) -> None:
|
||||
"""This method is deprecated. Please switch to 'load'."""
|
||||
warnings.warn(
|
||||
"'load_state_dict' is deprecated and will be removed in future versions. "
|
||||
"Please use 'load' instead."
|
||||
)
|
||||
storage_reader.reset()
|
||||
with _profile():
|
||||
# TODO: test returning `load` here instead.
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import os
|
|||
import warnings
|
||||
from concurrent.futures import Future, ThreadPoolExecutor
|
||||
from typing import cast, Optional, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
|
@ -24,6 +25,11 @@ from .utils import _api_bc_check, _DistWrapper, _profile
|
|||
__all__ = ["save_state_dict", "save", "async_save"]
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`save_state_dict` is deprecated and will be removed in future versions."
|
||||
"Please use `save` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def save_state_dict(
|
||||
state_dict: STATE_DICT_TYPE,
|
||||
storage_writer: StorageWriter,
|
||||
|
|
@ -33,11 +39,6 @@ def save_state_dict(
|
|||
planner: Optional[SavePlanner] = None,
|
||||
) -> Metadata:
|
||||
"""This method is deprecated. Please switch to 'save'."""
|
||||
warnings.warn(
|
||||
"'save_state_dict' is deprecated and will be removed in future versions."
|
||||
"Please use 'save' instead."
|
||||
)
|
||||
|
||||
storage_writer.reset()
|
||||
|
||||
# TODO: test returning `save` here instead.
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ import warnings
|
|||
from collections import namedtuple
|
||||
from datetime import timedelta
|
||||
from typing import Any, Callable, Dict, Optional, Tuple, Union, List, TYPE_CHECKING
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch._C._distributed_c10d import (
|
||||
|
|
@ -364,11 +365,12 @@ class _reduce_op:
|
|||
setattr(self, k, v)
|
||||
self.__members__ = ReduceOp.RedOpType.__members__
|
||||
|
||||
@deprecated(
|
||||
"`torch.distributed.reduce_op` is deprecated, "
|
||||
"please use `torch.distributed.ReduceOp` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def __getattribute__(self, key):
|
||||
warnings.warn(
|
||||
"torch.distributed.reduce_op is deprecated, please use "
|
||||
"torch.distributed.ReduceOp instead"
|
||||
)
|
||||
return object.__getattribute__(self, key)
|
||||
|
||||
|
||||
|
|
@ -675,7 +677,9 @@ def _get_pg_default_device(group: Optional[ProcessGroup] = None) -> torch.device
|
|||
warnings.warn(
|
||||
f"You are using a Backend {type(group)} as a ProcessGroup. "
|
||||
"This usage is deprecated since PyTorch 2.0. Please use a public API "
|
||||
"of PyTorch Distributed instead."
|
||||
"of PyTorch Distributed instead.",
|
||||
FutureWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
# Most users create Gloo with private API for object collectives
|
||||
_world.pg_default_device[group] = torch.device("cpu")
|
||||
|
|
@ -829,13 +833,15 @@ def get_global_rank(group: ProcessGroup, group_rank: int) -> int:
|
|||
return rank
|
||||
raise ValueError(f"Group rank {group_rank} is not part of group {group}")
|
||||
|
||||
|
||||
# TODO: remove this once the ecosystem moves away from it.
|
||||
@deprecated(
|
||||
"`torch.distributed.distributed_c10d._get_global_rank` is deprecated, "
|
||||
"please use `torch.distributed.distributed_c10d.get_global_rank` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def _get_global_rank(group, rank) -> int:
|
||||
"""Use get_global_rank as this method is deprecated."""
|
||||
warnings.warn(
|
||||
"torch.distributed.distributed_c10d._get_global_rank is deprecated "
|
||||
"please use torch.distributed.distributed_c10d.get_global_rank instead"
|
||||
)
|
||||
return get_global_rank(group, rank)
|
||||
|
||||
|
||||
|
|
@ -2286,6 +2292,12 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=None, async_op=False):
|
|||
work.wait()
|
||||
|
||||
@_exception_logger
|
||||
@deprecated(
|
||||
"`torch.distributed.all_reduce_coalesced` will be deprecated. If you must "
|
||||
"use it, please revisit our documentation later at "
|
||||
"https://pytorch.org/docs/main/distributed.html#collective-functions",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def all_reduce_coalesced(tensors, op=ReduceOp.SUM, group=None, async_op=False):
|
||||
"""
|
||||
WARNING: at this time individual shape checking is not implemented across nodes.
|
||||
|
|
@ -2320,11 +2332,6 @@ def all_reduce_coalesced(tensors, op=ReduceOp.SUM, group=None, async_op=False):
|
|||
None, if not async_op or if not part of the group.
|
||||
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.distributed.all_reduce_coalesced will be deprecated. If you must "
|
||||
"use it, please revisit our documentation later at "
|
||||
"https://pytorch.org/docs/main/distributed.html#collective-functions"
|
||||
)
|
||||
if isinstance(tensors, torch.Tensor):
|
||||
tensors = [tensors]
|
||||
_check_tensor_list(tensors, "tensor")
|
||||
|
|
@ -3198,6 +3205,11 @@ def all_gather_into_tensor(output_tensor, input_tensor, group=None, async_op=Fal
|
|||
|
||||
|
||||
@_exception_logger
|
||||
@deprecated(
|
||||
"`torch.distributed._all_gather_base` is a private function and will be deprecated. "
|
||||
"Please use `torch.distributed.all_gather_into_tensor` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def _all_gather_base(output_tensor, input_tensor, group=None, async_op=False):
|
||||
"""
|
||||
Single tensor all gather. Gathers a single tensor from all ranks, and puts them in a single output tensor.
|
||||
|
|
@ -3219,15 +3231,16 @@ def _all_gather_base(output_tensor, input_tensor, group=None, async_op=False):
|
|||
`all_gather_into_tensor` instead.
|
||||
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.distributed._all_gather_base is a private function and will be "
|
||||
"deprecated. Please use torch.distributed.all_gather_into_tensor "
|
||||
"instead."
|
||||
)
|
||||
return all_gather_into_tensor(output_tensor, input_tensor, group, async_op)
|
||||
|
||||
|
||||
@_exception_logger
|
||||
@deprecated(
|
||||
"`torch.distributed.all_gather_coalesced` will be deprecated. If you must use it, "
|
||||
"please revisit our documentation later at "
|
||||
"https://pytorch.org/docs/main/distributed.html#collective-functions",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def all_gather_coalesced(
|
||||
output_tensor_lists, input_tensor_list, group=None, async_op=False
|
||||
):
|
||||
|
|
@ -3274,11 +3287,6 @@ def all_gather_coalesced(
|
|||
performance improvements but users of this function should take extra care
|
||||
to ensure that each node passes in tensors whose shapes match across nodes.
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.distributed.all_gather_coalesced will be deprecated. If you must "
|
||||
"use it, please revisit our documentation later at "
|
||||
"https://pytorch.org/docs/main/distributed.html#collective-functions"
|
||||
)
|
||||
# We only check basic compatibility with C++ params here, C++ code will
|
||||
# do shape and type checking.
|
||||
if _rank_not_in_group(group):
|
||||
|
|
@ -3608,6 +3616,11 @@ def reduce_scatter_tensor(output, input, op=ReduceOp.SUM, group=None, async_op=F
|
|||
work.wait()
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.distributed._reduce_scatter_base` is a private function and will be deprecated. "
|
||||
"Please use `torch.distributed.reduce_scatter_tensor` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def _reduce_scatter_base(output, input, op=ReduceOp.SUM, group=None, async_op=False):
|
||||
"""
|
||||
Reduces, then scatters a flattened tensor to all processes in a group.
|
||||
|
|
@ -3628,11 +3641,6 @@ def _reduce_scatter_base(output, input, op=ReduceOp.SUM, group=None, async_op=Fa
|
|||
`reduce_scatter_tensor` instead.
|
||||
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.distributed._reduce_scatter_base is a private function and will "
|
||||
"be deprecated. Please use torch.distributed.reduce_scatter_tensor "
|
||||
"instead."
|
||||
)
|
||||
return reduce_scatter_tensor(output, input, op, group, async_op)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,10 +8,10 @@
|
|||
|
||||
import abc
|
||||
import time
|
||||
import warnings
|
||||
from collections import namedtuple
|
||||
from functools import wraps
|
||||
from typing import Dict, Optional
|
||||
from typing_extensions import deprecated
|
||||
|
||||
__all__ = ['MetricsConfig', 'MetricHandler', 'ConsoleMetricHandler', 'NullMetricHandler', 'MetricStream',
|
||||
'configure', 'getStream', 'prof', 'profile', 'put_metric', 'publish_metric', 'get_elapsed_time_ms',
|
||||
|
|
@ -137,6 +137,7 @@ def prof(fn=None, group: str = "torchelastic"):
|
|||
return wrap
|
||||
|
||||
|
||||
@deprecated("Deprecated, use `@prof` instead", category=FutureWarning)
|
||||
def profile(group=None):
|
||||
"""
|
||||
@profile decorator adds latency and success/failure metrics to any given function.
|
||||
|
|
@ -148,8 +149,6 @@ def profile(group=None):
|
|||
@metrics.profile("my_metric_group")
|
||||
def some_function(<arguments>):
|
||||
"""
|
||||
warnings.warn("Deprecated, use @prof instead", DeprecationWarning)
|
||||
|
||||
def wrap(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
|
|
@ -187,10 +186,11 @@ def put_metric(metric_name: str, metric_value: int, metric_group: str = "torchel
|
|||
getStream(metric_group).add_value(metric_name, metric_value)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"Deprecated, use `put_metric(metric_group)(metric_name, metric_value)` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def publish_metric(metric_group: str, metric_name: str, metric_value: int):
|
||||
warnings.warn(
|
||||
"Deprecated, use put_metric(metric_group)(metric_name, metric_value) instead"
|
||||
)
|
||||
metric_stream = getStream(metric_group)
|
||||
metric_stream.add_value(metric_name, metric_value)
|
||||
|
||||
|
|
|
|||
|
|
@ -446,7 +446,8 @@ def _init_core_state(
|
|||
elif sharding_strategy == ShardingStrategy.NO_SHARD:
|
||||
warnings.warn(
|
||||
"The `NO_SHARD` sharding strategy is deprecated. If having issues, "
|
||||
"please use DistributedDataParallel instead.",
|
||||
"please use `DistributedDataParallel` instead.",
|
||||
FutureWarning,
|
||||
# Level 1 is here, level 2 is from `FullyShardedDataParallel`, and
|
||||
# level 3 is from the true caller
|
||||
stacklevel=3,
|
||||
|
|
|
|||
|
|
@ -1198,11 +1198,13 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
return total_norm.to(total_norm_dtype)
|
||||
|
||||
@staticmethod
|
||||
def _warn_optim_input(optim_input):
|
||||
def _warn_optim_input(optim_input, *, stacklevel: int = 1):
|
||||
if optim_input is not None:
|
||||
warnings.warn(
|
||||
"The `optim_input` argument is deprecated and will be removed after PyTorch 1.13. You may remove it "
|
||||
"from your code without changing its functionality."
|
||||
"The `optim_input` argument is deprecated and will be removed after PyTorch 1.13. "
|
||||
"You may remove it from your code without changing its functionality.",
|
||||
FutureWarning,
|
||||
stacklevel=stacklevel + 1,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -1217,11 +1219,13 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
return False
|
||||
|
||||
@staticmethod
|
||||
def _warn_legacy_optim_state_dict(curr: str, new: str):
|
||||
def _warn_legacy_optim_state_dict(curr: str, new: str, *, stacklevel: int = 1):
|
||||
warnings.warn(
|
||||
f"``FullyShardedDataParallel.{curr}``is being deprecated and is "
|
||||
f"replaced by ``FullyShardedDataParallel.{new}``. "
|
||||
f"``FullyShardedDataParallel.{curr}`` may be removed after PyTorch 2.2."
|
||||
f"``FullyShardedDataParallel.{curr}`` may be removed after PyTorch 2.2.",
|
||||
FutureWarning,
|
||||
stacklevel=stacklevel + 1,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -1239,6 +1243,8 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
full_state_dict: bool = True,
|
||||
group: Optional[dist.ProcessGroup] = None,
|
||||
cpu_offload: bool = True,
|
||||
*,
|
||||
_stacklevel: int = 1,
|
||||
) -> Dict[str, Any]:
|
||||
"""Transform the state-dict of an optimizer corresponding to a sharded model.
|
||||
|
||||
|
|
@ -1247,7 +1253,9 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
FSDP internal information and internal sharding from the optim_state_dict.
|
||||
"""
|
||||
if full_state_dict:
|
||||
FullyShardedDataParallel._warn_optim_input(optim_input)
|
||||
FullyShardedDataParallel._warn_optim_input(
|
||||
optim_input, stacklevel=_stacklevel + 1
|
||||
)
|
||||
using_optim_input = FullyShardedDataParallel._is_using_optim_input(
|
||||
optim_input,
|
||||
optim,
|
||||
|
|
@ -1398,7 +1406,9 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
then nonzero ranks return an empty :class:`dict`.
|
||||
"""
|
||||
FullyShardedDataParallel._warn_legacy_optim_state_dict(
|
||||
"full_optim_state_dict", "optim_state_dict"
|
||||
"full_optim_state_dict",
|
||||
"optim_state_dict",
|
||||
stacklevel=2,
|
||||
)
|
||||
return FullyShardedDataParallel._optim_state_dict_impl(
|
||||
model=model,
|
||||
|
|
@ -1408,6 +1418,7 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
rank0_only=rank0_only,
|
||||
group=group,
|
||||
full_state_dict=True,
|
||||
_stacklevel=2,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -1429,7 +1440,9 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
cannot be directly used by the regular ``optim.load_state_dict``.
|
||||
"""
|
||||
FullyShardedDataParallel._warn_legacy_optim_state_dict(
|
||||
"sharded_optim_state_dict", "optim_state_dict"
|
||||
"sharded_optim_state_dict",
|
||||
"optim_state_dict",
|
||||
stacklevel=2,
|
||||
)
|
||||
return FullyShardedDataParallel._optim_state_dict_impl(
|
||||
model=model,
|
||||
|
|
@ -1439,6 +1452,7 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
rank0_only=False,
|
||||
full_state_dict=False,
|
||||
group=group,
|
||||
_stacklevel=2,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -1507,7 +1521,9 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
restricted to only include this rank's part of the optimizer state.
|
||||
"""
|
||||
FullyShardedDataParallel._warn_legacy_optim_state_dict(
|
||||
"shard_full_optim_state_dict", "optim_state_dict_to_load"
|
||||
"shard_full_optim_state_dict",
|
||||
"optim_state_dict_to_load",
|
||||
stacklevel=2,
|
||||
)
|
||||
return FullyShardedDataParallel._optim_state_dict_to_load_impl(
|
||||
optim_state_dict=full_optim_state_dict,
|
||||
|
|
@ -1544,7 +1560,9 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
Refer to :meth:`shard_full_optim_state_dict`.
|
||||
"""
|
||||
FullyShardedDataParallel._warn_legacy_optim_state_dict(
|
||||
"flatten_sharded_optim_state_dict", "optim_state_dict_to_load"
|
||||
"flatten_sharded_optim_state_dict",
|
||||
"optim_state_dict_to_load",
|
||||
stacklevel=2,
|
||||
)
|
||||
return FullyShardedDataParallel._optim_state_dict_to_load_impl(
|
||||
optim_state_dict=sharded_optim_state_dict,
|
||||
|
|
@ -1624,7 +1642,9 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
restricted to only include this rank's part of the optimizer state.
|
||||
"""
|
||||
FullyShardedDataParallel._warn_legacy_optim_state_dict(
|
||||
"scatter_full_optim_state_dict", "optim_state_dict_to_load"
|
||||
"scatter_full_optim_state_dict",
|
||||
"optim_state_dict_to_load",
|
||||
stacklevel=2,
|
||||
)
|
||||
return FullyShardedDataParallel._optim_state_dict_to_load_impl(
|
||||
optim_state_dict=full_optim_state_dict,
|
||||
|
|
@ -1855,6 +1875,7 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
cpu_offload=getattr(
|
||||
state_dict_settings.optim_state_dict_config, "offload_to_cpu", True
|
||||
),
|
||||
_stacklevel=2,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ will not pass ``--local-rank`` when you specify this flag.
|
|||
|
||||
"""
|
||||
|
||||
import warnings
|
||||
from typing_extensions import deprecated as _deprecated
|
||||
|
||||
from torch.distributed.run import get_args_parser, run
|
||||
|
||||
|
|
@ -188,17 +188,17 @@ def launch(args):
|
|||
run(args)
|
||||
|
||||
|
||||
@_deprecated(
|
||||
"The module torch.distributed.launch is deprecated\n"
|
||||
"and will be removed in future. Use torchrun.\n"
|
||||
"Note that --use-env is set by default in torchrun.\n"
|
||||
"If your script expects `--local-rank` argument to be set, please\n"
|
||||
"change it to read from `os.environ['LOCAL_RANK']` instead. See \n"
|
||||
"https://pytorch.org/docs/stable/distributed.html#launch-utility for \n"
|
||||
"further instructions\n",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def main(args=None):
|
||||
warnings.warn(
|
||||
"The module torch.distributed.launch is deprecated\n"
|
||||
"and will be removed in future. Use torchrun.\n"
|
||||
"Note that --use-env is set by default in torchrun.\n"
|
||||
"If your script expects `--local-rank` argument to be set, please\n"
|
||||
"change it to read from `os.environ['LOCAL_RANK']` instead. See \n"
|
||||
"https://pytorch.org/docs/stable/distributed.html#launch-utility for \n"
|
||||
"further instructions\n",
|
||||
FutureWarning,
|
||||
)
|
||||
args = parse_args(args)
|
||||
launch(args)
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@ optimizer locally on the workers where the parameters live. The distributed
|
|||
optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
|
||||
apply the gradients on each worker.
|
||||
"""
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
from torch import optim
|
||||
|
||||
|
|
@ -24,9 +26,15 @@ from .functional_sgd import _FunctionalSGD
|
|||
from .named_optimizer import _NamedOptimizer
|
||||
from .utils import as_functional_optim
|
||||
|
||||
from warnings import warn
|
||||
warn("TorchScript support for functional optimizers is"
|
||||
"deprecated and will be removed in a future PyTorch release. Consider using the torch.compile optimizer instead.")
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("always")
|
||||
warnings.warn(
|
||||
"`TorchScript` support for functional optimizers is deprecated "
|
||||
"and will be removed in a future PyTorch release. "
|
||||
"Consider using the `torch.compile` optimizer instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
# DistributedOptimizer imports torch.distributed.rpc names, so gate availability
|
||||
# based on RPC being available.
|
||||
|
|
|
|||
|
|
@ -1,7 +1,13 @@
|
|||
import warnings
|
||||
warnings.warn(
|
||||
"torch.distributed.pipeline is deprecated. For up-to-date pipeline parallel "
|
||||
"implementation, please refer to the PiPPy library under the PyTorch "
|
||||
"organization (Pipeline Parallelism for PyTorch): "
|
||||
"https://github.com/pytorch/PiPPy"
|
||||
)
|
||||
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("always")
|
||||
warnings.warn(
|
||||
"`torch.distributed.pipeline` is deprecated. For up-to-date pipeline parallel "
|
||||
"implementation, please refer to the PiPPy library under the PyTorch "
|
||||
"organization (Pipeline Parallelism for PyTorch): "
|
||||
"https://github.com/pytorch/PiPPy",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,11 @@ def _deprecate_warnings(func_name: str, extra_msg: str) -> None:
|
|||
"""
|
||||
# TODO: Will follow up with dynamo POC to make warnings.warn working with dynamo.
|
||||
if not is_torchdynamo_compiling():
|
||||
warnings.warn(f"{func_name} is deprecated and will be removed soon. {extra_msg}")
|
||||
warnings.warn(
|
||||
f"{func_name} is deprecated and will be removed soon. {extra_msg}",
|
||||
FutureWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
|
||||
def _validate_tp_mesh_dim(
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import warnings
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch.distributions import constraints
|
||||
|
|
@ -171,14 +172,15 @@ class Distribution:
|
|||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@deprecated(
|
||||
"`sample_n(n)` will be deprecated. Use `sample((n,))` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def sample_n(self, n: int) -> torch.Tensor:
|
||||
"""
|
||||
Generates n samples or n batches of samples if the distribution
|
||||
parameters are batched.
|
||||
"""
|
||||
warnings.warn(
|
||||
"sample_n will be deprecated. Use .sample((n,)) instead", UserWarning
|
||||
)
|
||||
return self.sample(torch.Size((n,)))
|
||||
|
||||
def log_prob(self, value: torch.Tensor) -> torch.Tensor:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
from warnings import warn
|
||||
import inspect
|
||||
from typing_extensions import deprecated
|
||||
from .conflict import ordering, ambiguities, super_signature, AmbiguityWarning
|
||||
from .utils import expand_tuples
|
||||
from .variadic import Variadic, isvariadic
|
||||
|
|
@ -27,24 +28,21 @@ def ambiguity_warn(dispatcher, ambiguities):
|
|||
warn(warning_text(dispatcher.name, ambiguities), AmbiguityWarning)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`halt_ordering` is deprecated, you can safely remove this call.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def halt_ordering():
|
||||
"""Deprecated interface to temporarily disable ordering.
|
||||
"""
|
||||
warn(
|
||||
'halt_ordering is deprecated, you can safely remove this call.',
|
||||
DeprecationWarning,
|
||||
)
|
||||
"""Deprecated interface to temporarily disable ordering."""
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`restart_ordering` is deprecated, if you would like to eagerly order the dispatchers, "
|
||||
"you should call the `reorder()` method on each dispatcher.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def restart_ordering(on_ambiguity=ambiguity_warn):
|
||||
"""Deprecated interface to temporarily resume ordering.
|
||||
"""
|
||||
warn(
|
||||
'restart_ordering is deprecated, if you would like to eagerly order'
|
||||
'the dispatchers, you should call the ``reorder()`` method on each'
|
||||
' dispatcher.',
|
||||
DeprecationWarning,
|
||||
)
|
||||
"""Deprecated interface to temporarily resume ordering."""
|
||||
|
||||
|
||||
def variadic_signature_matches_iter(types, full_signature):
|
||||
|
|
@ -316,14 +314,12 @@ class Dispatcher:
|
|||
result = self.funcs[signature]
|
||||
yield result
|
||||
|
||||
@deprecated("`resolve()` is deprecated, use `dispatch(*types)`", category=FutureWarning)
|
||||
def resolve(self, types):
|
||||
""" Determine appropriate implementation for this type signature
|
||||
.. deprecated:: 0.4.4
|
||||
Use ``dispatch(*types)`` instead
|
||||
"""
|
||||
warn("resolve() is deprecated, use dispatch(*types)",
|
||||
DeprecationWarning)
|
||||
|
||||
return self.dispatch(*types)
|
||||
|
||||
def __getstate__(self):
|
||||
|
|
|
|||
10
torch/hub.py
10
torch/hub.py
|
|
@ -13,6 +13,7 @@ import warnings
|
|||
import zipfile
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional, Any
|
||||
from typing_extensions import deprecated
|
||||
from urllib.error import HTTPError, URLError
|
||||
from urllib.request import urlopen, Request
|
||||
from urllib.parse import urlparse # noqa: F401
|
||||
|
|
@ -680,10 +681,13 @@ def _is_legacy_zip_format(filename: str) -> bool:
|
|||
return False
|
||||
|
||||
|
||||
@deprecated(
|
||||
'Falling back to the old format < 1.6. This support will be '
|
||||
'deprecated in favor of default zipfile format introduced in 1.6. '
|
||||
'Please redo torch.save() to save it in the new zipfile format.',
|
||||
category=FutureWarning,
|
||||
)
|
||||
def _legacy_zip_load(filename: str, model_dir: str, map_location: MAP_LOCATION, weights_only: bool) -> Dict[str, Any]:
|
||||
warnings.warn('Falling back to the old format < 1.6. This support will be '
|
||||
'deprecated in favor of default zipfile format introduced in 1.6. '
|
||||
'Please redo torch.save() to save it in the new zipfile format.')
|
||||
# Note: extractall() defaults to overwrite file if exists. No need to clean up beforehand.
|
||||
# We deliberately don't handle tarfile here since our legacy serialization format was in tar.
|
||||
# E.g. resnet18-5c106cde.pth which is widely used.
|
||||
|
|
|
|||
|
|
@ -1094,7 +1094,10 @@ def _script_impl(
|
|||
|
||||
if optimize is not None:
|
||||
warnings.warn(
|
||||
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
|
||||
"`optimize` is deprecated and has no effect. "
|
||||
"Use `with torch.jit.optimized_execution()` instead",
|
||||
FutureWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
# No-op for modules, functions, class instances that are already scripted
|
||||
|
|
|
|||
|
|
@ -978,7 +978,10 @@ def trace(
|
|||
return func
|
||||
if optimize is not None:
|
||||
warnings.warn(
|
||||
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
|
||||
"`optimize` is deprecated and has no effect. "
|
||||
"Use `with torch.jit.optimized_execution()` instead",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
from torch._utils_internal import (
|
||||
|
|
@ -1185,7 +1188,10 @@ def trace_module(
|
|||
return mod
|
||||
if optimize is not None:
|
||||
warnings.warn(
|
||||
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
|
||||
"`optimize` is deprecated and has no effect. "
|
||||
"Use `with torch.jit.optimized_execution()` instead",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
var_lookup_fn = _create_interpreter_name_lookup_fn(0)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
from ._ops import OpOverload
|
||||
from typing import Any, Optional, Set, List, Union, Callable, Tuple, Dict, Sequence
|
||||
from typing_extensions import deprecated
|
||||
import traceback
|
||||
import torch
|
||||
import weakref
|
||||
|
|
@ -8,7 +9,6 @@ import inspect
|
|||
import re
|
||||
import contextlib
|
||||
import sys
|
||||
import warnings
|
||||
from torch._library.custom_ops import custom_op, _maybe_get_opdef, device_types_t, CustomOpDef
|
||||
import torch._library as _library
|
||||
|
||||
|
|
@ -451,15 +451,15 @@ def _(lib: Library, name, dispatch_key=""):
|
|||
return wrap
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.library.impl_abstract` was renamed to `torch.library.register_fake`. Please use that "
|
||||
"instead; we will remove `torch.library.impl_abstract` in a future version of PyTorch.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1):
|
||||
r"""This API was renamed to :func:`torch.library.register_fake` in PyTorch 2.4.
|
||||
Please use that instead.
|
||||
"""
|
||||
warnings.warn("torch.library.impl_abstract was renamed to "
|
||||
"torch.library.register_fake. Please use that instead; "
|
||||
"we will remove torch.library.impl_abstract in a future "
|
||||
"version of PyTorch.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
if func is not None:
|
||||
_stacklevel = _stacklevel + 1
|
||||
return register_fake(qualname, func, lib=lib, _stacklevel=_stacklevel)
|
||||
|
|
|
|||
|
|
@ -277,5 +277,5 @@ def spawn(fn, args=(), nprocs=1, join=True, daemon=False, start_method="spawn"):
|
|||
"To use a different start_method use:\n\t\t"
|
||||
" torch.multiprocessing.start_processes(...)"
|
||||
)
|
||||
warnings.warn(msg)
|
||||
warnings.warn(msg, FutureWarning, stacklevel=2)
|
||||
return start_processes(fn, args, nprocs, join, daemon, start_method="spawn")
|
||||
|
|
|
|||
|
|
@ -1818,7 +1818,8 @@ See :class:`~torch.nn.Softplus` for more details.
|
|||
|
||||
def _get_softmax_dim(name: str, ndim: int, stacklevel: int) -> int:
|
||||
warnings.warn(
|
||||
f"Implicit dimension choice for {name} has been deprecated. Change the call to include dim=X as an argument.",
|
||||
f"Implicit dimension choice for {name} has been deprecated. "
|
||||
"Change the call to include dim=X as an argument.",
|
||||
stacklevel=stacklevel,
|
||||
)
|
||||
if ndim == 0 or ndim == 1 or ndim == 3:
|
||||
|
|
@ -3823,7 +3824,11 @@ def upsample(input, size=None, scale_factor=None, mode="nearest", align_corners=
|
|||
affects the outputs.
|
||||
|
||||
"""
|
||||
warnings.warn("nn.functional.upsample is deprecated. Use nn.functional.interpolate instead.")
|
||||
warnings.warn(
|
||||
"`nn.functional.upsample` is deprecated. "
|
||||
"Use `nn.functional.interpolate` instead.",
|
||||
stacklevel=2,
|
||||
)
|
||||
return interpolate(input, size, scale_factor, mode, align_corners)
|
||||
|
||||
|
||||
|
|
@ -4143,7 +4148,11 @@ def upsample_nearest(input, size=None, scale_factor=None): # noqa: F811
|
|||
{backward_reproducibility_note}
|
||||
"""
|
||||
# DeprecationWarning is ignored by default
|
||||
warnings.warn("nn.functional.upsample_nearest is deprecated. Use nn.functional.interpolate instead.")
|
||||
warnings.warn(
|
||||
"`nn.functional.upsample_nearest` is deprecated. "
|
||||
"Use `nn.functional.interpolate` instead.",
|
||||
stacklevel=2,
|
||||
)
|
||||
return interpolate(input, size, scale_factor, mode="nearest")
|
||||
|
||||
|
||||
|
|
@ -4199,7 +4208,11 @@ def upsample_bilinear(input, size=None, scale_factor=None): # noqa: F811
|
|||
{backward_reproducibility_note}
|
||||
"""
|
||||
# DeprecationWarning is ignored by default
|
||||
warnings.warn("nn.functional.upsample_bilinear is deprecated. Use nn.functional.interpolate instead.")
|
||||
warnings.warn(
|
||||
"`nn.functional.upsample_bilinear` is deprecated. "
|
||||
"Use `nn.functional.interpolate` instead.",
|
||||
stacklevel=2,
|
||||
)
|
||||
return interpolate(input, size, scale_factor, mode="bilinear", align_corners=True)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -599,7 +599,11 @@ def _make_deprecate(meth):
|
|||
old_name = new_name[:-1]
|
||||
|
||||
def deprecated_init(*args, **kwargs):
|
||||
warnings.warn(f"nn.init.{old_name} is now deprecated in favor of nn.init.{new_name}.", stacklevel=2)
|
||||
warnings.warn(
|
||||
f"`nn.init.{old_name}` is now deprecated in favor of `nn.init.{new_name}`.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return meth(*args, **kwargs)
|
||||
|
||||
deprecated_init.__doc__ = fr"""
|
||||
|
|
|
|||
|
|
@ -219,10 +219,18 @@ class Hardtanh(Module):
|
|||
) -> None:
|
||||
super().__init__()
|
||||
if min_value is not None:
|
||||
warnings.warn("keyword argument min_value is deprecated and rename to min_val")
|
||||
warnings.warn(
|
||||
"keyword argument `min_value` is deprecated and rename to `min_val`",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
min_val = min_value
|
||||
if max_value is not None:
|
||||
warnings.warn("keyword argument max_value is deprecated and rename to max_val")
|
||||
warnings.warn(
|
||||
"keyword argument `max_value` is deprecated and rename to `max_val`",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
max_val = max_value
|
||||
|
||||
self.min_val = min_val
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
import warnings
|
||||
from collections import OrderedDict, abc as container_abcs
|
||||
from itertools import chain, islice
|
||||
import operator
|
||||
|
|
@ -10,6 +9,7 @@ from torch._jit_internal import _copy_to_script_wrapper
|
|||
|
||||
from typing import Any, Dict, Iterable, Iterator, Mapping, Optional, overload, Tuple, TypeVar, Union
|
||||
from typing_extensions import Self
|
||||
from typing_extensions import deprecated
|
||||
|
||||
__all__ = ['Container', 'Sequential', 'ModuleList', 'ModuleDict', 'ParameterList', 'ParameterDict']
|
||||
|
||||
|
|
@ -29,13 +29,14 @@ def _addindent(s_, numSpaces):
|
|||
return s
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`nn.Container` is deprecated. "
|
||||
"All of it's functionality is now implemented in `nn.Module`. Subclass that instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
class Container(Module):
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
super().__init__()
|
||||
# DeprecationWarning is ignored by default <sigh>
|
||||
warnings.warn("nn.Container is deprecated. All of it's functionality "
|
||||
"is now implemented in nn.Module. Subclass that instead.")
|
||||
for key, value in kwargs.items():
|
||||
self.add_module(key, value)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import math
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
|
@ -13,6 +12,7 @@ from torch._torch_docs import reproducibility_notes
|
|||
|
||||
from ..common_types import _size_1_t, _size_2_t, _size_3_t
|
||||
from typing import Optional, List, Tuple, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d',
|
||||
'LazyConv1d', 'LazyConv2d', 'LazyConv3d', 'LazyConvTranspose1d', 'LazyConvTranspose2d',
|
||||
|
|
@ -40,9 +40,6 @@ convolution_notes = \
|
|||
:math:`(C_\text{in}=C_\text{in}, C_\text{out}=C_\text{in} \times \text{K}, ..., \text{groups}=C_\text{in})`."""} # noqa: B950
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class _ConvNd(Module):
|
||||
|
||||
__constants__ = ['stride', 'padding', 'dilation', 'groups',
|
||||
|
|
@ -610,7 +607,6 @@ class Conv3d(_ConvNd):
|
|||
return self._conv_forward(input, self.weight, self.bias)
|
||||
|
||||
|
||||
|
||||
class _ConvTransposeNd(_ConvNd):
|
||||
def __init__(self, in_channels, out_channels, kernel_size, stride,
|
||||
padding, dilation, transposed, output_padding,
|
||||
|
|
@ -1121,10 +1117,13 @@ class ConvTranspose3d(_ConvTransposeNd):
|
|||
# `_ConvTransposeNd` is really not a mixin anymore (but multiple inheritance as
|
||||
# above would still work).
|
||||
class _ConvTransposeMixin(_ConvTransposeNd):
|
||||
|
||||
@deprecated(
|
||||
"`_ConvTransposeMixin` is a deprecated internal class. "
|
||||
"Please consider using public APIs.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def __init__(self, *args, **kwargs):
|
||||
warnings.warn(
|
||||
"_ConvTransposeMixin is a deprecated internal class. "
|
||||
"Please consider using public APIs.")
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
import warnings
|
||||
|
||||
from .distance import PairwiseDistance
|
||||
from .module import Module
|
||||
from .. import functional as F
|
||||
|
|
@ -7,6 +5,7 @@ from .. import _reduction as _Reduction
|
|||
|
||||
from torch import Tensor
|
||||
from typing import Callable, Optional
|
||||
from typing_extensions import deprecated
|
||||
|
||||
__all__ = ['L1Loss', 'NLLLoss', 'NLLLoss2d', 'PoissonNLLLoss', 'GaussianNLLLoss', 'KLDivLoss',
|
||||
'MSELoss', 'BCELoss', 'BCEWithLogitsLoss', 'HingeEmbeddingLoss', 'MultiLabelMarginLoss',
|
||||
|
|
@ -218,12 +217,15 @@ class NLLLoss(_WeightedLoss):
|
|||
return F.nll_loss(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`NLLLoss2d` has been deprecated. "
|
||||
"Please use `NLLLoss` instead as a drop-in replacement and see "
|
||||
"https://pytorch.org/docs/main/nn.html#torch.nn.NLLLoss for more details.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
class NLLLoss2d(NLLLoss):
|
||||
def __init__(self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100,
|
||||
reduce=None, reduction: str = 'mean') -> None:
|
||||
warnings.warn("NLLLoss2d has been deprecated. "
|
||||
"Please use NLLLoss instead as a drop-in replacement and see "
|
||||
"https://pytorch.org/docs/main/nn.html#torch.nn.NLLLoss for more details.")
|
||||
super().__init__(weight, size_average, ignore_index, reduce, reduction)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1335,20 +1335,28 @@ class Module:
|
|||
def _maybe_warn_non_full_backward_hook(self, inputs, result, grad_fn):
|
||||
if not isinstance(result, torch.Tensor):
|
||||
if not (isinstance(result, tuple) and all(isinstance(r, torch.Tensor) for r in result)):
|
||||
warnings.warn("Using non-full backward hooks on a Module that does not return a "
|
||||
"single Tensor or a tuple of Tensors is deprecated and will be removed "
|
||||
"in future versions. This hook will be missing some of the grad_output. "
|
||||
"Please use register_full_backward_hook to get the documented behavior.")
|
||||
warnings.warn(
|
||||
"Using non-full backward hooks on a Module that does not return a "
|
||||
"single Tensor or a tuple of Tensors is deprecated and will be removed "
|
||||
"in future versions. This hook will be missing some of the grad_output. "
|
||||
"Please use register_full_backward_hook to get the documented behavior.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return
|
||||
else:
|
||||
result = (result,)
|
||||
|
||||
if not isinstance(inputs, torch.Tensor):
|
||||
if not (isinstance(inputs, tuple) and all(isinstance(i, torch.Tensor) for i in inputs)):
|
||||
warnings.warn("Using non-full backward hooks on a Module that does not take as input a "
|
||||
"single Tensor or a tuple of Tensors is deprecated and will be removed "
|
||||
"in future versions. This hook will be missing some of the grad_input. "
|
||||
"Please use register_full_backward_hook to get the documented behavior.")
|
||||
warnings.warn(
|
||||
"Using non-full backward hooks on a Module that does not take as input a "
|
||||
"single Tensor or a tuple of Tensors is deprecated and will be removed "
|
||||
"in future versions. This hook will be missing some of the grad_input. "
|
||||
"Please use register_full_backward_hook to get the documented behavior.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return
|
||||
else:
|
||||
inputs = (inputs,)
|
||||
|
|
@ -1356,13 +1364,21 @@ class Module:
|
|||
# At this point we are sure that inputs and result are tuple of Tensors
|
||||
out_grad_fn = {r.grad_fn for r in result if r.grad_fn is not None}
|
||||
if len(out_grad_fn) == 0 or (len(out_grad_fn) == 1 and grad_fn not in out_grad_fn):
|
||||
warnings.warn("Using a non-full backward hook when outputs are nested in python data structure "
|
||||
"is deprecated and will be removed in future versions. This hook will be missing "
|
||||
"some grad_output.")
|
||||
warnings.warn(
|
||||
"Using a non-full backward hook when outputs are nested in python data structure "
|
||||
"is deprecated and will be removed in future versions. This hook will be missing "
|
||||
"some grad_output.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
elif len(out_grad_fn) > 1:
|
||||
warnings.warn("Using a non-full backward hook when outputs are generated by different autograd Nodes "
|
||||
"is deprecated and will be removed in future versions. This hook will be missing "
|
||||
"some grad_output. Please use register_full_backward_hook to get the documented behavior.")
|
||||
warnings.warn(
|
||||
"Using a non-full backward hook when outputs are generated by different autograd Nodes "
|
||||
"is deprecated and will be removed in future versions. This hook will be missing "
|
||||
"some grad_output. Please use register_full_backward_hook to get the documented behavior.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
else:
|
||||
# At this point the grad_output part of the hook will most likely be correct
|
||||
inputs_grad_fn = {i.grad_fn for i in inputs if i.grad_fn is not None}
|
||||
|
|
@ -1370,10 +1386,14 @@ class Module:
|
|||
next_functions = {n[0] for n in grad_fn.next_functions}
|
||||
|
||||
if inputs_grad_fn != next_functions:
|
||||
warnings.warn("Using a non-full backward hook when the forward contains multiple autograd Nodes "
|
||||
"is deprecated and will be removed in future versions. This hook will be missing "
|
||||
"some grad_input. Please use register_full_backward_hook to get the documented "
|
||||
"behavior.")
|
||||
warnings.warn(
|
||||
"Using a non-full backward hook when the forward contains multiple autograd Nodes "
|
||||
"is deprecated and will be removed in future versions. This hook will be missing "
|
||||
"some grad_input. Please use register_full_backward_hook to get the documented "
|
||||
"behavior.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
def register_forward_pre_hook(
|
||||
self,
|
||||
|
|
@ -1887,17 +1907,20 @@ class Module:
|
|||
"""
|
||||
# TODO: Remove `args` and the parsing logic when BC allows.
|
||||
if len(args) > 0:
|
||||
# DeprecationWarning is ignored by default
|
||||
warnings.warn(
|
||||
"Positional args are being deprecated, use kwargs instead. Refer to "
|
||||
"https://pytorch.org/docs/main/generated/torch.nn.Module.html#torch.nn.Module.state_dict"
|
||||
" for details.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if destination is None:
|
||||
destination = args[0]
|
||||
if len(args) > 1 and prefix == '':
|
||||
prefix = args[1]
|
||||
if len(args) > 2 and keep_vars is False:
|
||||
keep_vars = args[2]
|
||||
# DeprecationWarning is ignored by default
|
||||
warnings.warn(
|
||||
"Positional args are being deprecated, use kwargs instead. Refer to "
|
||||
"https://pytorch.org/docs/main/generated/torch.nn.Module.html#torch.nn.Module.state_dict"
|
||||
" for details.")
|
||||
|
||||
if destination is None:
|
||||
destination = OrderedDict()
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import warnings
|
|||
import numbers
|
||||
import weakref
|
||||
from typing import List, Tuple, Optional, overload
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
|
@ -24,8 +25,11 @@ def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Ten
|
|||
return tensor.index_select(dim, permutation)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`apply_permutation` is deprecated, please use `tensor.index_select(dim, permutation)` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
|
||||
warnings.warn("apply_permutation is deprecated, please use tensor.index_select(dim, permutation) instead")
|
||||
return _apply_permutation(tensor, permutation, dim)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
from typing_extensions import deprecated
|
||||
|
||||
from .parallel_apply import parallel_apply
|
||||
from .replicate import replicate
|
||||
from .data_parallel import DataParallel, data_parallel
|
||||
|
|
@ -7,8 +9,11 @@ from .distributed import DistributedDataParallel
|
|||
__all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
|
||||
'DataParallel', 'DistributedDataParallel']
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.nn.parallel.DistributedDataParallelCPU` is deprecated, "
|
||||
"please use `torch.nn.parallel.DistributedDataParallel` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def DistributedDataParallelCPU(*args, **kwargs):
|
||||
import warnings
|
||||
warnings.warn("torch.nn.parallel.DistributedDataParallelCPU is deprecated, "
|
||||
"please use torch.nn.parallel.DistributedDataParallel instead.")
|
||||
return DistributedDataParallel(*args, **kwargs)
|
||||
|
|
|
|||
|
|
@ -226,7 +226,10 @@ def gather(tensors, dim=0, destination=None, *, out=None):
|
|||
if destination == -1:
|
||||
warnings.warn(
|
||||
'Using -1 to represent CPU tensor is deprecated. Please use a '
|
||||
'device object or string instead, e.g., "cpu".')
|
||||
'device object or string instead, e.g., "cpu".',
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
destination = _get_device_index(destination, allow_cpu=True, optional=True)
|
||||
return torch._C._gather(tensors, dim, destination)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -771,7 +771,9 @@ class DistributedDataParallel(Module, Joinable):
|
|||
# do not receive gradients.
|
||||
warnings.warn(
|
||||
"The `check_reduction` argument in `DistributedDataParallel` "
|
||||
"module is deprecated. Please avoid using it."
|
||||
"module is deprecated. Please avoid using it.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
# Check that a module does not have Uninitialized parameters
|
||||
|
|
|
|||
|
|
@ -1,13 +1,17 @@
|
|||
import torch
|
||||
from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar, Union, overload
|
||||
from typing_extensions import deprecated
|
||||
from ._functions import Scatter, Gather
|
||||
import warnings
|
||||
|
||||
__all__ = ['scatter', 'scatter_kwargs', 'gather']
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`is_namedtuple` is deprecated, please use the python checks instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def is_namedtuple(obj: Any) -> bool:
|
||||
# Check if type was created from collections.namedtuple or a typing.NamedTuple.
|
||||
warnings.warn("is_namedtuple is deprecated, please use the python checks instead")
|
||||
return _is_namedtuple(obj)
|
||||
|
||||
def _is_namedtuple(obj: Any) -> bool:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import warnings
|
||||
import functools
|
||||
from typing import Union, Iterable, List, Dict, Tuple, Optional, cast
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
|
@ -99,6 +99,11 @@ def clip_grad_norm_(
|
|||
return total_norm
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.nn.utils.clip_grad_norm` is now deprecated "
|
||||
"in favor of `torch.nn.utils.clip_grad_norm_`.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def clip_grad_norm(
|
||||
parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.,
|
||||
error_if_nonfinite: bool = False, foreach: Optional[bool] = None) -> torch.Tensor:
|
||||
|
|
@ -108,8 +113,6 @@ def clip_grad_norm(
|
|||
This method is now deprecated in favor of
|
||||
:func:`torch.nn.utils.clip_grad_norm_`.
|
||||
"""
|
||||
warnings.warn("torch.nn.utils.clip_grad_norm is now deprecated in favor "
|
||||
"of torch.nn.utils.clip_grad_norm_.", stacklevel=2)
|
||||
return clip_grad_norm_(parameters, max_norm, norm_type, error_if_nonfinite, foreach)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import contextlib
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
from typing import Any, Dict, Iterator, Optional, Set, Tuple, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
|
@ -148,6 +148,12 @@ def _reparametrize_module(
|
|||
)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.nn.utils.stateless.functional_call` is deprecated as of PyTorch 2.0 "
|
||||
"and will be removed in a future version of PyTorch. "
|
||||
"Please use `torch.func.functional_call` instead which is a drop-in replacement.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def functional_call(
|
||||
module: "torch.nn.Module",
|
||||
parameters_and_buffers: Dict[str, Tensor],
|
||||
|
|
@ -216,12 +222,6 @@ def functional_call(
|
|||
Returns:
|
||||
Any: the result of calling ``module``.
|
||||
"""
|
||||
warnings.warn(
|
||||
"This API is deprecated as of PyTorch 2.0 and will be removed in a future "
|
||||
"version of PyTorch. Please use torch.func.functional_call instead "
|
||||
"which is a drop-in replacement for this API."
|
||||
)
|
||||
|
||||
return _functional_call(
|
||||
module,
|
||||
parameters_and_buffers,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ r"""Weight Normalization from https://arxiv.org/abs/1602.07868."""
|
|||
from torch.nn.parameter import Parameter, UninitializedParameter
|
||||
from torch import _weight_norm, norm_except_dim
|
||||
from typing import Any, TypeVar
|
||||
import warnings
|
||||
from typing_extensions import deprecated
|
||||
from ..modules import Module
|
||||
|
||||
__all__ = ['WeightNorm', 'weight_norm', 'remove_weight_norm']
|
||||
|
|
@ -24,9 +24,12 @@ class WeightNorm:
|
|||
return _weight_norm(v, g, self.dim)
|
||||
|
||||
@staticmethod
|
||||
@deprecated(
|
||||
"`torch.nn.utils.weight_norm` is deprecated "
|
||||
"in favor of `torch.nn.utils.parametrizations.weight_norm`.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def apply(module, name: str, dim: int) -> 'WeightNorm':
|
||||
warnings.warn("torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.")
|
||||
|
||||
for hook in module._forward_pre_hooks.values():
|
||||
if isinstance(hook, WeightNorm) and hook.name == name:
|
||||
raise RuntimeError(f"Cannot register two weight_norm hooks on the same parameter {name}")
|
||||
|
|
|
|||
|
|
@ -598,7 +598,11 @@ class profile(_KinetoProfile):
|
|||
):
|
||||
activities_set = set(activities) if activities else supported_activities()
|
||||
if use_cuda is not None:
|
||||
warn("use_cuda is deprecated, use activities argument instead")
|
||||
warn(
|
||||
"`use_cuda` is deprecated, use `activities` argument instead",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if use_cuda:
|
||||
activities_set.add(ProfilerActivity.CUDA)
|
||||
elif ProfilerActivity.CUDA in activities_set:
|
||||
|
|
|
|||
|
|
@ -359,9 +359,12 @@ def to_sparse_semi_structured(
|
|||
[-4370, -4370, -4370, ..., -4370, -4370, -4370]], device='cuda:0', dtype=torch.int16))
|
||||
"""
|
||||
if transposed:
|
||||
raise DeprecationWarning(
|
||||
"Setting transpose from to_sparse_semi_structured is deprecated and will be removed in a future release."
|
||||
"SparseSemiStructuredTensor only support contiguous input tensors. "
|
||||
warnings.warn(
|
||||
"Setting transpose from `to_sparse_semi_structured` is deprecated "
|
||||
"and will be removed in a future release. "
|
||||
"`SparseSemiStructuredTensor` only support contiguous input tensors.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
# set from _FORCE_CUTLASS flag
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ import abc
|
|||
import cmath
|
||||
import collections.abc
|
||||
import contextlib
|
||||
import warnings
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
|
|
@ -16,6 +15,7 @@ from typing import (
|
|||
Type,
|
||||
Union,
|
||||
)
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -1523,6 +1523,12 @@ def assert_close(
|
|||
raise error_metas[0].to_error(msg)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.testing.assert_allclose()` is deprecated since 1.12 and will be removed in a future release. "
|
||||
"Please use `torch.testing.assert_close()` instead. "
|
||||
"You can find detailed upgrade instructions in https://github.com/pytorch/pytorch/issues/61844.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def assert_allclose(
|
||||
actual: Any,
|
||||
expected: Any,
|
||||
|
|
@ -1538,14 +1544,6 @@ def assert_allclose(
|
|||
Please use :func:`torch.testing.assert_close` instead. You can find detailed upgrade instructions
|
||||
`here <https://github.com/pytorch/pytorch/issues/61844>`_.
|
||||
"""
|
||||
warnings.warn(
|
||||
"`torch.testing.assert_allclose()` is deprecated since 1.12 and will be removed in a future release. "
|
||||
"Please use `torch.testing.assert_close()` instead. "
|
||||
"You can find detailed upgrade instructions in https://github.com/pytorch/pytorch/issues/61844.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
if not isinstance(actual, torch.Tensor):
|
||||
actual = torch.tensor(actual)
|
||||
if not isinstance(expected, torch.Tensor):
|
||||
|
|
|
|||
|
|
@ -150,8 +150,9 @@ def make_tensor(
|
|||
warnings.warn(
|
||||
"Passing `low==high` to `torch.testing.make_tensor` for floating or complex types "
|
||||
"is deprecated since 2.1 and will be removed in 2.3. "
|
||||
"Use torch.full(...) instead.",
|
||||
"Use `torch.full(...)` instead.",
|
||||
FutureWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
elif low >= high:
|
||||
raise ValueError(f"`low` must be less than `high`, but got {low} >= {high}")
|
||||
|
|
|
|||
|
|
@ -7,9 +7,9 @@ import io
|
|||
import pickle
|
||||
import tokenize
|
||||
import unittest
|
||||
import warnings
|
||||
from types import FunctionType, ModuleType
|
||||
from typing import Any, Dict, Optional, Set, Union
|
||||
from typing_extensions import deprecated
|
||||
from unittest import mock
|
||||
|
||||
# Types saved/loaded in configs
|
||||
|
|
@ -196,12 +196,12 @@ class ConfigModule(ModuleType):
|
|||
self._is_dirty = False
|
||||
return self._hash_digest
|
||||
|
||||
@deprecated(
|
||||
"`config.to_dict()` has been deprecated. It may no longer change the underlying config."
|
||||
" use `config.shallow_copy_dict()` or `config.get_config_copy()` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
warnings.warn(
|
||||
"config.to_dict() has been deprecated. It may no longer change the underlying config."
|
||||
" use config.shallow_copy_dict() or config.get_config_copy() instead",
|
||||
DeprecationWarning,
|
||||
)
|
||||
return self.shallow_copy_dict()
|
||||
|
||||
def shallow_copy_dict(self) -> Dict[str, Any]:
|
||||
|
|
|
|||
|
|
@ -122,10 +122,14 @@ class _DecoratorContextManager:
|
|||
|
||||
def __call__(self, orig_func: F) -> F:
|
||||
if inspect.isclass(orig_func):
|
||||
warnings.warn("Decorating classes is deprecated and will be disabled in "
|
||||
"future versions. You should only decorate functions or methods. "
|
||||
"To preserve the current behavior of class decoration, you can "
|
||||
"directly decorate the `__init__` method and nothing else.")
|
||||
warnings.warn(
|
||||
"Decorating classes is deprecated and will be disabled in "
|
||||
"future versions. You should only decorate functions or methods. "
|
||||
"To preserve the current behavior of class decoration, you can "
|
||||
"directly decorate the `__init__` method and nothing else.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
func = cast(F, lambda *args, **kwargs: orig_func(*args, **kwargs))
|
||||
else:
|
||||
func = orig_func
|
||||
|
|
|
|||
|
|
@ -15,7 +15,6 @@ collection support for PyTorch APIs.
|
|||
import functools
|
||||
import sys
|
||||
import types
|
||||
import warnings
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
|
|
@ -28,6 +27,7 @@ from typing import (
|
|||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -167,6 +167,11 @@ def register_pytree_node(
|
|||
)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.utils._cxx_pytree._register_pytree_node` is deprecated. "
|
||||
"Please use `torch.utils._cxx_pytree.register_pytree_node` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def _register_pytree_node(
|
||||
cls: Type[Any],
|
||||
flatten_fn: FlattenFunc,
|
||||
|
|
@ -207,11 +212,6 @@ def _register_pytree_node(
|
|||
original context. This is used for json deserialization, which is being used in
|
||||
:mod:`torch.export` right now.
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.utils._cxx_pytree._register_pytree_node is deprecated. "
|
||||
"Please use torch.utils._cxx_pytree.register_pytree_node instead.",
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
_private_register_pytree_node(
|
||||
cls,
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ from typing import (
|
|||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from typing_extensions import deprecated
|
||||
|
||||
|
||||
__all__ = [
|
||||
|
|
@ -251,6 +252,11 @@ def _register_namedtuple(
|
|||
)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.utils._pytree._register_pytree_node` is deprecated. "
|
||||
"Please use `torch.utils._pytree.register_pytree_node` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def _register_pytree_node(
|
||||
cls: Type[Any],
|
||||
flatten_fn: FlattenFunc,
|
||||
|
|
@ -287,16 +293,12 @@ def _register_pytree_node(
|
|||
Like ``flatten_fn``, but in place of a List[leaf], it should return
|
||||
a List[(keypath, leaf)].
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.utils._pytree._register_pytree_node is deprecated. "
|
||||
"Please use torch.utils._pytree.register_pytree_node instead.",
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
if to_str_fn is not None or maybe_from_str_fn is not None:
|
||||
warnings.warn(
|
||||
"to_str_fn and maybe_from_str_fn is deprecated. "
|
||||
"Please use to_dumpable_context and from_dumpable_context instead."
|
||||
"`to_str_fn` and `maybe_from_str_fn` is deprecated. "
|
||||
"Please use `to_dumpable_context` and `from_dumpable_context` instead.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
_private_register_pytree_node(
|
||||
|
|
@ -1451,14 +1453,20 @@ def treespec_pprint(treespec: TreeSpec) -> str:
|
|||
|
||||
|
||||
# TODO(angelayi): remove this function after OSS/internal stabilize
|
||||
@deprecated(
|
||||
"`pytree_to_str` is deprecated. Please use `treespec_dumps` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def pytree_to_str(treespec: TreeSpec) -> str:
|
||||
warnings.warn("pytree_to_str is deprecated. Please use treespec_dumps")
|
||||
return treespec_dumps(treespec)
|
||||
|
||||
|
||||
# TODO(angelayi): remove this function after OSS/internal stabilize
|
||||
@deprecated(
|
||||
"`str_to_pytree` is deprecated. Please use `treespec_loads` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def str_to_pytree(json: str) -> TreeSpec:
|
||||
warnings.warn("str_to_pytree is deprecated. Please use treespec_loads")
|
||||
return treespec_loads(json)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,10 @@
|
|||
import warnings
|
||||
from typing_extensions import deprecated as _deprecated
|
||||
|
||||
|
||||
@_deprecated(
|
||||
"Usage of `backward_compatibility.worker_init_fn` is deprecated "
|
||||
"as `DataLoader` automatically applies sharding in every worker",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def worker_init_fn(worker_id):
|
||||
warnings.warn("Usage of backward_compatibility.worker_init_fn is deprecated"
|
||||
" as DataLoader automatically applies sharding in every worker")
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ from typing import (
|
|||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from typing_extensions import deprecated
|
||||
|
||||
# No 'default_generator' in torch/__init__.pyi
|
||||
from torch import default_generator, randperm
|
||||
|
|
@ -348,12 +349,11 @@ class ConcatDataset(Dataset[T_co]):
|
|||
return self.datasets[dataset_idx][sample_idx]
|
||||
|
||||
@property
|
||||
@deprecated(
|
||||
"`cummulative_sizes` attribute is renamed to `cumulative_sizes`",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def cummulative_sizes(self):
|
||||
warnings.warn(
|
||||
"cummulative_sizes attribute is renamed to " "cumulative_sizes",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return self.cumulative_sizes
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import inspect
|
|||
import warnings
|
||||
|
||||
from typing import Any, List, Optional, Set
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -116,11 +117,12 @@ def apply_shuffle_settings(datapipe: DataPipe, shuffle: Optional[bool] = None) -
|
|||
return datapipe
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`apply_shuffle_seed` is deprecated since 1.12 and will be removed in the future releases. "
|
||||
"Please use `apply_random_seed` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def apply_shuffle_seed(datapipe: DataPipe, rng: Any) -> DataPipe:
|
||||
warnings.warn(
|
||||
"`apply_shuffle_seed` is deprecated since 1.12 and will be removed in the future releases."
|
||||
"\nPlease use `apply_random_seed` instead."
|
||||
)
|
||||
return apply_random_seed(datapipe, rng)
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user