mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Revert "[BE] wrap deprecated function/class with typing_extensions.deprecated (#126898)"
This reverts commit 749a132fb0.
Reverted https://github.com/pytorch/pytorch/pull/126898 on behalf of https://github.com/fbgheith due to switching typing-extensions=4.3.0 to 4.9.0 causes internal failure ([comment](https://github.com/pytorch/pytorch/pull/126898#issuecomment-2142884456))
This commit is contained in:
parent
ea13e9a097
commit
033e733021
2
.github/requirements/conda-env-Linux-X64.txt
vendored
2
.github/requirements/conda-env-Linux-X64.txt
vendored
|
|
@ -6,4 +6,4 @@ numpy=1.23.3
|
|||
pyyaml=6.0
|
||||
requests=2.31.0
|
||||
setuptools=68.2.2
|
||||
typing-extensions=4.9.0
|
||||
typing-extensions=4.3.0
|
||||
|
|
|
|||
2
.github/requirements/conda-env-iOS.txt
vendored
2
.github/requirements/conda-env-iOS.txt
vendored
|
|
@ -5,4 +5,4 @@ numpy=1.23.3
|
|||
pyyaml=6.0
|
||||
requests=2.31.0
|
||||
setuptools=68.2.2
|
||||
typing-extensions=4.9.0
|
||||
typing-extensions=4.3.0
|
||||
|
|
|
|||
2
.github/requirements/conda-env-macOS-ARM64
vendored
2
.github/requirements/conda-env-macOS-ARM64
vendored
|
|
@ -2,7 +2,7 @@ numpy=1.22.3
|
|||
pyyaml=6.0
|
||||
setuptools=61.2.0
|
||||
cmake=3.22.*
|
||||
typing-extensions=4.9.0
|
||||
typing-extensions=4.3.0
|
||||
dataclasses=0.8
|
||||
pip=22.2.2
|
||||
pillow=10.0.1
|
||||
|
|
|
|||
2
.github/requirements/conda-env-macOS-X64
vendored
2
.github/requirements/conda-env-macOS-X64
vendored
|
|
@ -4,7 +4,7 @@ numpy=1.21.2
|
|||
pyyaml=5.3
|
||||
setuptools=46.0.0
|
||||
cmake=3.22.*
|
||||
typing-extensions=4.9.0
|
||||
typing-extensions=4.3.0
|
||||
dataclasses=0.8
|
||||
pip=22.2.2
|
||||
pillow=10.0.1
|
||||
|
|
|
|||
|
|
@ -237,7 +237,7 @@ class DTensorAPITest(DTensorTestBase):
|
|||
assert isinstance(outputs, DTensor)
|
||||
return outputs.to_local()
|
||||
|
||||
with self.assertWarnsRegex(FutureWarning, "Deprecating"):
|
||||
with self.assertWarnsRegex(UserWarning, "Deprecating"):
|
||||
replica_module = distribute_module(
|
||||
module_to_replicate,
|
||||
device_mesh,
|
||||
|
|
|
|||
|
|
@ -1436,7 +1436,7 @@ class TestFSDPOptimState(FSDPTest):
|
|||
def get_warning_context():
|
||||
warning_regex = "`optim_input` argument is deprecated"
|
||||
return self.assertWarnsRegex(
|
||||
expected_warning=FutureWarning, expected_regex=warning_regex
|
||||
expected_warning=UserWarning, expected_regex=warning_regex
|
||||
)
|
||||
|
||||
self._run_on_all_optim_state_apis(
|
||||
|
|
|
|||
|
|
@ -3258,7 +3258,7 @@ class TestComposability(TestCase):
|
|||
x = torch.randn(3, device=device)
|
||||
|
||||
# functorch version of the API is deprecated
|
||||
with self.assertWarnsRegex(FutureWarning, "Please use `torch.vmap`"):
|
||||
with self.assertWarnsRegex(UserWarning, "Please use torch.vmap"):
|
||||
vmap(torch.sin)
|
||||
|
||||
# the non-functorch version is not deprecated
|
||||
|
|
@ -3276,9 +3276,7 @@ class TestComposability(TestCase):
|
|||
new_api = getattr(torch.func, transform)
|
||||
|
||||
# functorch version of the API is deprecated
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning, f"Please use `torch.func.{transform}`"
|
||||
):
|
||||
with self.assertWarnsRegex(UserWarning, f"Please use torch.func.{transform}"):
|
||||
api(torch.sin)
|
||||
|
||||
# the non-functorch version is not deprecated
|
||||
|
|
|
|||
|
|
@ -521,7 +521,7 @@ class TestNNInit(TestCase):
|
|||
init.normal(x)
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning,
|
||||
UserWarning,
|
||||
"deprecated",
|
||||
msg="methods not suffixed with underscore should be deprecated",
|
||||
):
|
||||
|
|
|
|||
|
|
@ -1387,8 +1387,7 @@ class TestModuleHookNN(NNTestCase):
|
|||
m.register_backward_hook(noop)
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning,
|
||||
"does not take as input a single Tensor or a tuple of Tensors",
|
||||
UserWarning, "does not take as input a single Tensor or a tuple of Tensors"
|
||||
):
|
||||
m([a, b])
|
||||
|
||||
|
|
@ -1401,7 +1400,7 @@ class TestModuleHookNN(NNTestCase):
|
|||
m.register_backward_hook(noop)
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning, "does not return a single Tensor or a tuple of Tensors"
|
||||
UserWarning, "does not return a single Tensor or a tuple of Tensors"
|
||||
):
|
||||
m(a, b)
|
||||
|
||||
|
|
@ -1414,7 +1413,7 @@ class TestModuleHookNN(NNTestCase):
|
|||
m.register_backward_hook(noop)
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning, "outputs are generated by different autograd Nodes"
|
||||
UserWarning, "outputs are generated by different autograd Nodes"
|
||||
):
|
||||
m(a, b)
|
||||
|
||||
|
|
@ -1427,7 +1426,7 @@ class TestModuleHookNN(NNTestCase):
|
|||
m.register_backward_hook(noop)
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning, "the forward contains multiple autograd Nodes"
|
||||
UserWarning, "the forward contains multiple autograd Nodes"
|
||||
):
|
||||
m(a)
|
||||
|
||||
|
|
|
|||
|
|
@ -255,8 +255,8 @@ class TestAutocastCPU(TestCase):
|
|||
|
||||
def test_cpu_autocast_deprecated_warning(self):
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning,
|
||||
r"`torch.cpu.amp.autocast\(args...\)` is deprecated. Please use `torch.amp.autocast\('cpu', args...\)` instead.",
|
||||
DeprecationWarning,
|
||||
r"torch.cpu.amp.autocast\(args...\) is deprecated. Please use torch.amp.autocast\('cpu', args...\) instead.",
|
||||
):
|
||||
with torch.cpu.amp.autocast():
|
||||
_ = torch.ones(10)
|
||||
|
|
|
|||
|
|
@ -154,7 +154,7 @@ class TestAutograd(TestCase):
|
|||
|
||||
def test_grad_mode_class_decoration(self):
|
||||
# Decorating class is deprecated and should not be used
|
||||
with self.assertWarnsRegex(FutureWarning, "Decorating classes is deprecated"):
|
||||
with self.assertWarnsRegex(UserWarning, "Decorating classes is deprecated"):
|
||||
|
||||
@torch.no_grad()
|
||||
class Foo:
|
||||
|
|
@ -5937,13 +5937,13 @@ Done""",
|
|||
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning, "`get_numerical_jacobian` was part of PyTorch's private API"
|
||||
UserWarning, "get_numerical_jacobian was part of PyTorch's private API"
|
||||
):
|
||||
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
|
||||
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning, "`get_numerical_jacobian` was part of PyTorch's private API"
|
||||
UserWarning, "get_numerical_jacobian was part of PyTorch's private API"
|
||||
):
|
||||
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
|
||||
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
|
||||
|
|
@ -5963,7 +5963,7 @@ Done""",
|
|||
|
||||
outputs = fn(a, b)
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning, "`get_analytical_jacobian` was part of PyTorch's private API"
|
||||
UserWarning, "get_analytical_jacobian was part of PyTorch's private API"
|
||||
):
|
||||
(
|
||||
jacobians,
|
||||
|
|
@ -5991,7 +5991,7 @@ Done""",
|
|||
|
||||
outputs = NonDetFunc.apply(a, 1e-6)
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning, "`get_analytical_jacobian` was part of PyTorch's private API"
|
||||
UserWarning, "get_analytical_jacobian was part of PyTorch's private API"
|
||||
):
|
||||
(
|
||||
jacobians,
|
||||
|
|
|
|||
|
|
@ -1820,10 +1820,10 @@ torch.cuda.synchronize()
|
|||
return grad, grad
|
||||
|
||||
self.assertRegex(
|
||||
str(w[0].message), r"`torch.cuda.amp.custom_fwd\(args...\)` is deprecated."
|
||||
str(w[0].message), r"torch.cuda.amp.custom_fwd\(args...\) is deprecated."
|
||||
)
|
||||
self.assertRegex(
|
||||
str(w[1].message), r"`torch.cuda.amp.custom_bwd\(args...\)` is deprecated."
|
||||
str(w[1].message), r"torch.cuda.amp.custom_bwd\(args...\) is deprecated."
|
||||
)
|
||||
|
||||
mymm = MyMM.apply
|
||||
|
|
@ -2016,8 +2016,8 @@ torch.cuda.synchronize()
|
|||
|
||||
def test_cuda_autocast_deprecated_warning(self):
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning,
|
||||
r"`torch.cuda.amp.autocast\(args...\)` is deprecated. Please use `torch.amp.autocast\('cuda', args...\)` instead.",
|
||||
DeprecationWarning,
|
||||
r"torch.cuda.amp.autocast\(args...\) is deprecated. Please use torch.amp.autocast\('cuda', args...\) instead.",
|
||||
):
|
||||
with torch.cuda.amp.autocast():
|
||||
_ = torch.ones(10)
|
||||
|
|
|
|||
|
|
@ -287,7 +287,7 @@ class TestOptimRenewed(TestCase):
|
|||
inpt = torch.randn(5, device=device, dtype=dtype)
|
||||
|
||||
# avoid endless recompiles by wrapping LR in a tensor if we're compiling
|
||||
lr = torch.tensor(0.01) if torch.compiler.is_compiling() else 0.01
|
||||
lr = torch.tensor(0.01) if torch._utils.is_compiling() else 0.01
|
||||
optimizer = optim_cls([{"params": [weight]}, {"params": [bias], "lr": lr}])
|
||||
schedulers = [scheduler_c(optimizer) for scheduler_c in schedulers_c]
|
||||
|
||||
|
|
|
|||
|
|
@ -338,7 +338,7 @@ $1: f32[2] = torch._ops.prims.sin.default($0)""")
|
|||
prims.mul(torch.randn(2), 1 + 1j)
|
||||
|
||||
def test_check_deprecation_warning(self):
|
||||
with self.assertWarnsRegex(FutureWarning, 'will be removed in the future'):
|
||||
with self.assertWarnsRegex(DeprecationWarning, 'will be removed in the future'):
|
||||
torch._prims_common.check(True, lambda: 'message')
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -723,7 +723,7 @@ class TestPythonPytree(TestCase):
|
|||
self.y = y
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning, "torch.utils._pytree._register_pytree_node"
|
||||
UserWarning, "torch.utils._pytree._register_pytree_node"
|
||||
):
|
||||
py_pytree._register_pytree_node(
|
||||
DummyType,
|
||||
|
|
|
|||
|
|
@ -901,7 +901,7 @@ exit(len(w))
|
|||
m = torch.nn.Linear(1, 1)
|
||||
params = dict(m.named_parameters())
|
||||
x = torch.randn(3, 1)
|
||||
with self.assertWarnsRegex(FutureWarning, "Please use `torch.func.functional_call`"):
|
||||
with self.assertWarnsRegex(UserWarning, "Please use torch.func.functional_call"):
|
||||
stateless.functional_call(m, params, x)
|
||||
|
||||
class TestPythonOptimizeMode(TestCase):
|
||||
|
|
|
|||
|
|
@ -6198,8 +6198,8 @@ else:
|
|||
GradScaler = torch.cuda.amp.GradScaler if "cuda" == device.type else torch.cpu.amp.GradScaler
|
||||
|
||||
with self.assertWarnsRegex(
|
||||
FutureWarning,
|
||||
rf"`torch.{device.type}.amp.GradScaler\(args...\)` is deprecated.",
|
||||
UserWarning,
|
||||
rf"torch.{device.type}.amp.GradScaler\(args...\) is deprecated.",
|
||||
):
|
||||
_ = GradScaler(init_scale=2.0)
|
||||
|
||||
|
|
|
|||
|
|
@ -1996,6 +1996,17 @@ from torch import func as func
|
|||
from torch.func import vmap
|
||||
|
||||
|
||||
# The function _sparse_coo_tensor_unsafe is removed from PyTorch
|
||||
# Python API (v. 1.13), here we temporarily provide its replacement
|
||||
# with a deprecation warning.
|
||||
# TODO: remove the function for PyTorch v 1.15.
|
||||
def _sparse_coo_tensor_unsafe(*args, **kwargs):
|
||||
import warnings
|
||||
warnings.warn('torch._sparse_coo_tensor_unsafe is deprecated, '
|
||||
'use torch.sparse_coo_tensor(..., check_invariants=False) instead.')
|
||||
kwargs['check_invariants'] = False
|
||||
return torch.sparse_coo_tensor(*args, **kwargs)
|
||||
|
||||
# Register MPS specific decomps
|
||||
torch.backends.mps._init()
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from . import trace_rules, variables
|
|||
from .comptime import comptime
|
||||
from .eval_frame import DisableContext, innermost_fn, RunOnlyContext
|
||||
from .exc import IncorrectUsage
|
||||
from .external_utils import is_compiling
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from torch._C._dynamo.eval_frame import ( # noqa: F401
|
||||
|
|
@ -272,7 +273,7 @@ def mark_static(t, index=None):
|
|||
Unlike mark_dynamic, this can be done inside a graph, in which case it
|
||||
induces specialization on the tensor.
|
||||
"""
|
||||
if torch.compiler.is_compiling():
|
||||
if is_compiling():
|
||||
if index is None:
|
||||
for s in t.size():
|
||||
comptime.force_static(s)
|
||||
|
|
|
|||
|
|
@ -798,8 +798,7 @@ def explain(f, *extra_args, **extra_kwargs):
|
|||
warnings.warn(
|
||||
"explain(f, *args, **kwargs) is deprecated, use explain(f)(*args, **kwargs) instead. "
|
||||
"If you don't migrate, we may break your explain call in the future if your user defined kwargs "
|
||||
"conflict with future kwargs added to explain(f).",
|
||||
FutureWarning,
|
||||
"conflict with future kwargs added to explain(f)."
|
||||
)
|
||||
return inner(*extra_args, **extra_kwargs)
|
||||
else:
|
||||
|
|
@ -942,7 +941,7 @@ def check_signature_rewritable(graph):
|
|||
tb = "".join(traceback.format_list(stack))
|
||||
extra = ""
|
||||
if len(user_stacks) > 1:
|
||||
extra = f"(elided {len(user_stacks) - 1} more accesses)"
|
||||
extra = f"(elided {len(user_stacks)-1} more accesses)"
|
||||
msg = f"{source.name()}, accessed at:\n{tb}{extra}"
|
||||
# TODO: option to print ALL of the stack traces at once
|
||||
input_errors.append(msg)
|
||||
|
|
@ -1477,8 +1476,7 @@ def export(
|
|||
warnings.warn(
|
||||
"export(f, *args, **kwargs) is deprecated, use export(f)(*args, **kwargs) instead. "
|
||||
"If you don't migrate, we may break your export call in the future if your user defined kwargs "
|
||||
"conflict with future kwargs added to export(f).",
|
||||
FutureWarning,
|
||||
"conflict with future kwargs added to export(f)."
|
||||
)
|
||||
return inner(*extra_args, **extra_kwargs)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
import functools
|
||||
from typing import List
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.utils._pytree as pytree
|
||||
|
|
@ -13,10 +12,6 @@ except ModuleNotFoundError:
|
|||
np = None # type: ignore[assignment]
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`is_compiling` is deprecated. Use `torch.compiler.is_compiling()` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def is_compiling() -> bool:
|
||||
"""
|
||||
Indicates whether we are tracing/compiling with torch.compile() or torch.export().
|
||||
|
|
|
|||
|
|
@ -188,7 +188,7 @@ def vmap(
|
|||
vmap does not provide general autobatching or handle variable-length
|
||||
sequences out of the box.
|
||||
"""
|
||||
from torch.compiler import is_compiling
|
||||
from torch._dynamo import is_compiling
|
||||
|
||||
_check_randomness_arg(randomness)
|
||||
if not (chunk_size is None or chunk_size > 0):
|
||||
|
|
@ -390,7 +390,7 @@ def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Calla
|
|||
"""
|
||||
# To avoid cyclical dependency.
|
||||
import torch._functorch.eager_transforms as eager_transforms
|
||||
from torch.compiler import is_compiling
|
||||
from torch._dynamo import is_compiling
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
return eager_transforms.grad_impl(func, argnums, has_aux, args, kwargs)
|
||||
|
|
@ -432,8 +432,8 @@ def grad_and_value(
|
|||
|
||||
See :func:`grad` for examples
|
||||
"""
|
||||
from torch._dynamo import is_compiling
|
||||
from torch._functorch import eager_transforms
|
||||
from torch.compiler import is_compiling
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
return eager_transforms.grad_and_value_impl(
|
||||
|
|
|
|||
|
|
@ -1,12 +1,3 @@
|
|||
"""
|
||||
The APIs in this file are exposed as `functorch.*`. They are thin wrappers
|
||||
around the torch.func.* APIs that have deprecation warnings -- we're trying
|
||||
to move people to the torch.func.* equivalents.
|
||||
|
||||
NB: We don't use *args, **kwargs in the signatures because that changes the
|
||||
documentation.
|
||||
"""
|
||||
|
||||
import textwrap
|
||||
import warnings
|
||||
from typing import Any, Callable, Optional, Tuple, Union
|
||||
|
|
@ -18,16 +9,25 @@ import torch.nn as nn
|
|||
from torch._functorch.eager_transforms import argnums_t
|
||||
from torch._functorch.vmap import in_dims_t, out_dims_t
|
||||
|
||||
"""
|
||||
The APIs in this file are exposed as `functorch.*`. They are thin wrappers
|
||||
around the torch.func.* APIs that have deprecation warnings -- we're trying
|
||||
to move people to the torch.func.* equivalents.
|
||||
|
||||
NB: We don't use *args, **kwargs in the signatures because that changes the
|
||||
documentation.
|
||||
"""
|
||||
|
||||
|
||||
def get_warning(api, new_api=None, replace_newlines=False):
|
||||
if new_api is None:
|
||||
new_api = f"torch.func.{api}"
|
||||
warning = (
|
||||
f"We've integrated functorch into PyTorch. As the final step of the \n"
|
||||
f"integration, `functorch.{api}` is deprecated as of PyTorch \n"
|
||||
f"integration, functorch.{api} is deprecated as of PyTorch \n"
|
||||
f"2.0 and will be deleted in a future version of PyTorch >= 2.3. \n"
|
||||
f"Please use `{new_api}` instead; see the PyTorch 2.0 release notes \n"
|
||||
f"and/or the `torch.func` migration guide for more details \n"
|
||||
f"Please use {new_api} instead; see the PyTorch 2.0 release notes \n"
|
||||
f"and/or the torch.func migration guide for more details \n"
|
||||
f"https://pytorch.org/docs/main/func.migrating.html"
|
||||
)
|
||||
if replace_newlines:
|
||||
|
|
@ -37,7 +37,7 @@ def get_warning(api, new_api=None, replace_newlines=False):
|
|||
|
||||
def warn_deprecated(api, new_api=None):
|
||||
warning = get_warning(api, new_api, replace_newlines=True)
|
||||
warnings.warn(warning, FutureWarning, stacklevel=2)
|
||||
warnings.warn(warning, stacklevel=2)
|
||||
|
||||
|
||||
def setup_docs(functorch_api, torch_func_api=None, new_api_name=None):
|
||||
|
|
|
|||
|
|
@ -765,7 +765,7 @@ def jacrev(
|
|||
# Dynamo does not support HOP composition if their inner function is
|
||||
# annotated with @functools.wraps(...). We circumvent this issue by applying
|
||||
# wraps only if we're not tracing with dynamo.
|
||||
if not torch.compiler.is_compiling():
|
||||
if not torch._dynamo.is_compiling():
|
||||
wrapper_fn = wraps(func)(wrapper_fn)
|
||||
|
||||
return wrapper_fn
|
||||
|
|
@ -1346,7 +1346,7 @@ def jacfwd(
|
|||
# Dynamo does not support HOP composition if their inner function is
|
||||
# annotated with @functools.wraps(...). We circumvent this issue by applying
|
||||
# wraps only if we're not tracing with dynamo.
|
||||
if not torch.compiler.is_compiling():
|
||||
if not torch._dynamo.is_compiling():
|
||||
wrapper_fn = wraps(func)(wrapper_fn)
|
||||
|
||||
return wrapper_fn
|
||||
|
|
|
|||
|
|
@ -16,8 +16,7 @@ __all__ = ["tree_map_", "treespec_pprint"]
|
|||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("always")
|
||||
warnings.warn(
|
||||
"`torch._functorch.pytree_hacks` is deprecated and will be removed in a future release. "
|
||||
"Please `use torch.utils._pytree` instead.",
|
||||
"torch._functorch.pytree_hacks is deprecated and will be removed in a future release. "
|
||||
"Please use torch.utils._pytree instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ def associative_scan(
|
|||
assert callable(combine_fn), "combine_fn must be a callable, but got {combine_fn}"
|
||||
assert isinstance(dim, int), "dim must be an int, but got {type(dim)}"
|
||||
|
||||
if not torch.compiler.is_compiling():
|
||||
if not torch._dynamo.is_compiling():
|
||||
with _set_compilation_env(), torch._dynamo.utils.disable_cache_limit():
|
||||
return torch.compile(associative_scan, fullgraph=True)(
|
||||
combine_fn, input, dim
|
||||
|
|
|
|||
|
|
@ -2750,8 +2750,7 @@ class FixedLayout(Layout):
|
|||
"""A closure containing math to read a given element"""
|
||||
|
||||
def indexer(index):
|
||||
assert len(index) == len(self.stride)
|
||||
assert len(index) == len(self.size)
|
||||
assert len(index) == len(self.stride) == len(self.size)
|
||||
result = self.offset
|
||||
for idx, stride, sz in zip(index, self.stride, self.size):
|
||||
if sz != 1:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import contextlib
|
||||
import functools
|
||||
import warnings
|
||||
from typing import Callable, Optional
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch._library.utils import Kernel, RegistrationHandle
|
||||
|
|
@ -124,11 +124,10 @@ class AbstractImplCtx:
|
|||
self._shape_env = _fake_mode.shape_env
|
||||
self._op = _op
|
||||
|
||||
@deprecated(
|
||||
"`create_unbacked_symint` is deprecated, please use `new_dynamic_size` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def create_unbacked_symint(self, *, min=2, max=None) -> torch.SymInt:
|
||||
warnings.warn(
|
||||
"create_unbacked_symint is deprecated, please use new_dynamic_size instead"
|
||||
)
|
||||
return self.new_dynamic_size(min=min, max=max)
|
||||
|
||||
def new_dynamic_size(self, *, min=0, max=None) -> torch.SymInt:
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ from typing import (
|
|||
TYPE_CHECKING,
|
||||
Union,
|
||||
)
|
||||
from typing_extensions import deprecated, TypeAlias
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
|
@ -1789,11 +1789,6 @@ def check_in_bounds_for_storage(
|
|||
# NOTE: This function should ideally be removed, but some Meta internal models
|
||||
# packaged with `torch.package` are using it, so it will have to be removed
|
||||
# at some point in the future when those models no longer use this function.
|
||||
@deprecated(
|
||||
"`torch._prims_common.check` is deprecated and will be removed in the future. "
|
||||
"Please use `torch._check*` functions instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def check(
|
||||
b: bool, s: Callable[[], str], exc_type: Type[Exception] = RuntimeError
|
||||
) -> None:
|
||||
|
|
@ -1806,6 +1801,12 @@ def check(
|
|||
.. note:: This function is planned for removal in the future. Please use
|
||||
`torch._check*` functions instead.
|
||||
"""
|
||||
warnings.warn(
|
||||
DeprecationWarning(
|
||||
"'torch._prims_common.check' will be removed in the future. Please use "
|
||||
"'torch._check*' functions instead"
|
||||
)
|
||||
)
|
||||
torch._check_with(exc_type, b, s)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import traceback
|
|||
import warnings
|
||||
from collections import defaultdict
|
||||
from typing import Any, Callable, DefaultDict, Generic, List, Optional
|
||||
from typing_extensions import deprecated, ParamSpec
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -852,10 +852,6 @@ def classproperty(func):
|
|||
return _ClassPropertyDescriptor(func)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`is_compiling` is deprecated. Use `torch.compiler.is_compiling()` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def is_compiling() -> bool:
|
||||
"""
|
||||
Indicates whether we are tracing/compiling with torch.compile() or torch.export().
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import functools
|
||||
import warnings
|
||||
from typing import Any, Callable, List, Optional, Tuple, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
|
@ -190,14 +190,14 @@ def _get_name(func: Callable):
|
|||
# vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors,
|
||||
# sends those into func, and then unwraps the output BatchedTensors. Operations
|
||||
# on BatchedTensors perform the batched operations that the user is asking for.
|
||||
@deprecated(
|
||||
"Please use `torch.vmap` instead of `torch._vmap_internals.vmap`.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def vmap(func: Callable, in_dims: in_dims_t = 0, out_dims: out_dims_t = 0) -> Callable:
|
||||
"""
|
||||
Please use torch.vmap instead of this API.
|
||||
"""
|
||||
warnings.warn(
|
||||
"Please use torch.vmap instead of torch._vmap_internals.vmap. ",
|
||||
stacklevel=2,
|
||||
)
|
||||
return _vmap(func, in_dims, out_dims)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -224,6 +224,7 @@ class MultiheadAttention(nn.MultiheadAttention):
|
|||
|
||||
return fp
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_observed(cls, other):
|
||||
# The whole flow is float -> observed -> quantized
|
||||
|
|
@ -335,10 +336,7 @@ class MultiheadAttention(nn.MultiheadAttention):
|
|||
|
||||
if attn_mask is not None:
|
||||
if attn_mask.dtype == torch.uint8:
|
||||
warnings.warn(
|
||||
"Byte tensor for `attn_mask` in `nn.MultiheadAttention` is deprecated. "
|
||||
"Use bool tensor instead.",
|
||||
)
|
||||
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
|
||||
attn_mask = attn_mask.to(torch.bool)
|
||||
assert attn_mask.is_floating_point() or attn_mask.dtype == torch.bool, \
|
||||
f'Only float and bool types are supported for attn_mask, not {attn_mask.dtype}'
|
||||
|
|
@ -356,10 +354,7 @@ class MultiheadAttention(nn.MultiheadAttention):
|
|||
|
||||
# convert ByteTensor key_padding_mask to bool
|
||||
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
|
||||
warnings.warn(
|
||||
"Byte tensor for `key_padding_mask` in `nn.MultiheadAttention` is deprecated. "
|
||||
"Use bool tensor instead.",
|
||||
)
|
||||
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
|
||||
key_padding_mask = key_padding_mask.to(torch.bool)
|
||||
if self.bias_k is not None and self.bias_v is not None:
|
||||
if static_k is None and static_v is None:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import numbers
|
||||
import warnings
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
|
@ -17,11 +16,8 @@ def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Ten
|
|||
return tensor.index_select(dim, permutation)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`apply_permutation` is deprecated, please use `tensor.index_select(dim, permutation)` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
|
||||
warnings.warn("apply_permutation is deprecated, please use tensor.index_select(dim, permutation) instead")
|
||||
return _apply_permutation(tensor, permutation, dim)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -949,30 +949,24 @@ def convert(
|
|||
if convert_custom_config is None:
|
||||
convert_custom_config = ConvertCustomConfig()
|
||||
|
||||
if isinstance(convert_custom_config, dict):
|
||||
if isinstance(convert_custom_config, Dict):
|
||||
warnings.warn(
|
||||
"Passing a convert_custom_config_dict to convert is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a ConvertCustomConfig instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
"in a future version. Please pass in a ConvertCustomConfig instead.")
|
||||
convert_custom_config = ConvertCustomConfig.from_dict(convert_custom_config)
|
||||
|
||||
if isinstance(qconfig_mapping, dict):
|
||||
if isinstance(qconfig_mapping, Dict):
|
||||
warnings.warn(
|
||||
"Passing a QConfig dictionary to convert is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a QConfigMapping instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
"in a future version. Please pass in a QConfigMapping instead.")
|
||||
qconfig_mapping = QConfigMapping.from_dict(qconfig_mapping) if qconfig_mapping else None
|
||||
qconfig_mapping = copy.deepcopy(qconfig_mapping)
|
||||
assert qconfig_mapping is None or isinstance(qconfig_mapping, QConfigMapping)
|
||||
|
||||
if isinstance(backend_config, dict):
|
||||
if isinstance(backend_config, Dict):
|
||||
warnings.warn(
|
||||
"Passing a backend_config_dict to prepare is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a BackendConfig instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
"in a future version. Please pass in a BackendConfig instead.")
|
||||
backend_config = BackendConfig.from_dict(backend_config)
|
||||
|
||||
if backend_config is None:
|
||||
|
|
|
|||
|
|
@ -52,20 +52,16 @@ def fuse(
|
|||
if fuse_custom_config is None:
|
||||
fuse_custom_config = FuseCustomConfig()
|
||||
|
||||
if isinstance(fuse_custom_config, dict):
|
||||
if isinstance(fuse_custom_config, Dict):
|
||||
warnings.warn(
|
||||
"Passing a fuse_custom_config_dict to fuse is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a FuseCustomConfig instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
"in a future version. Please pass in a FuseCustomConfig instead.")
|
||||
fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config)
|
||||
|
||||
if isinstance(backend_config, dict):
|
||||
if isinstance(backend_config, Dict):
|
||||
warnings.warn(
|
||||
"Passing a backend_config_dict to prepare is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a BackendConfig instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
"in a future version. Please pass in a BackendConfig instead.")
|
||||
backend_config = BackendConfig.from_dict(backend_config)
|
||||
|
||||
named_modules = dict(model.named_modules())
|
||||
|
|
|
|||
|
|
@ -1749,36 +1749,28 @@ def prepare(
|
|||
if _equalization_config is None:
|
||||
_equalization_config = QConfigMapping()
|
||||
|
||||
if isinstance(qconfig_mapping, dict):
|
||||
if isinstance(qconfig_mapping, Dict):
|
||||
warnings.warn(
|
||||
"Passing a QConfig dictionary to prepare is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a QConfigMapping instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
"in a future version. Please pass in a QConfigMapping instead.")
|
||||
qconfig_mapping = QConfigMapping.from_dict(qconfig_mapping)
|
||||
|
||||
if isinstance(_equalization_config, dict):
|
||||
if isinstance(_equalization_config, Dict):
|
||||
warnings.warn(
|
||||
"Passing a QConfig dictionary to prepare for equalization is deprecated and will not "
|
||||
"be supported in a future version. Please pass in a QConfigMapping instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
"be supported in a future version. Please pass in a QConfigMapping instead.")
|
||||
_equalization_config = QConfigMapping.from_dict(_equalization_config)
|
||||
|
||||
if isinstance(prepare_custom_config, dict):
|
||||
if isinstance(prepare_custom_config, Dict):
|
||||
warnings.warn(
|
||||
"Passing a prepare_custom_config_dict to prepare is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a PrepareCustomConfig instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
"in a future version. Please pass in a PrepareCustomConfig instead.")
|
||||
prepare_custom_config = PrepareCustomConfig.from_dict(prepare_custom_config)
|
||||
|
||||
if isinstance(backend_config, dict):
|
||||
if isinstance(backend_config, Dict):
|
||||
warnings.warn(
|
||||
"Passing a backend_config_dict to prepare is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a BackendConfig instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
"in a future version. Please pass in a BackendConfig instead.")
|
||||
backend_config = BackendConfig.from_dict(backend_config)
|
||||
|
||||
assert isinstance(qconfig_mapping, QConfigMapping)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
from collections import namedtuple
|
||||
from typing import Optional, Any, Union, Type
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
|
@ -107,10 +106,6 @@ class QConfig(namedtuple('QConfig', ['activation', 'weight'])):
|
|||
return super().__new__(cls, activation, weight)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`QConfigDynamic` is going to be deprecated in PyTorch 1.12, please use `QConfig` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
class QConfigDynamic(namedtuple('QConfigDynamic', ['activation', 'weight'])):
|
||||
"""
|
||||
Describes how to dynamically quantize a layer or a part of the network by providing
|
||||
|
|
@ -132,6 +127,7 @@ class QConfigDynamic(namedtuple('QConfigDynamic', ['activation', 'weight'])):
|
|||
if isinstance(weight, nn.Module):
|
||||
raise ValueError("QConfigDynamic received observer instance, please pass observer class instead. " +
|
||||
"Use MyObserver.with_args(x=1) to override arguments to constructor if needed")
|
||||
warnings.warn("QConfigDynamic is going to be deprecated in PyTorch 1.12, please use QConfig instead")
|
||||
return super().__new__(cls, activation, weight)
|
||||
|
||||
|
||||
|
|
@ -426,20 +422,16 @@ _default_quint8_placeholder_qconfig = QConfig(
|
|||
weight=None,
|
||||
)
|
||||
|
||||
@deprecated(
|
||||
"`torch.ao.quantization.get_default_qconfig_dict` is deprecated and will be removed in "
|
||||
"a future version. Please use `torch.ao.quantization.get_default_qconfig_mapping` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def get_default_qconfig_dict(backend='x86', version=0):
|
||||
warnings.warn(
|
||||
"torch.ao.quantization.get_default_qconfig_dict is deprecated and will be removed in "
|
||||
"a future version. Please use torch.ao.quantization.get_default_qconfig_mapping instead.")
|
||||
return torch.ao.quantization.get_default_qconfig_mapping(backend, version).to_dict()
|
||||
|
||||
@deprecated(
|
||||
"`torch.ao.quantization.get_default_qat_qconfig_dict` is deprecated and will be removed in "
|
||||
"a future version. Please use `torch.ao.quantization.get_default_qat_qconfig_mapping` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def get_default_qat_qconfig_dict(backend='x86', version=1):
|
||||
warnings.warn(
|
||||
"torch.ao.quantization.get_default_qat_qconfig_dict is deprecated and will be removed in "
|
||||
"a future version. Please use torch.ao.quantization.get_default_qat_qconfig_mapping instead.")
|
||||
return torch.ao.quantization.get_default_qat_qconfig_mapping(backend, version).to_dict()
|
||||
|
||||
def _assert_valid_qconfig(qconfig: Optional[QConfig],
|
||||
|
|
|
|||
|
|
@ -117,12 +117,10 @@ forward graph of the parent module,
|
|||
if _equalization_config is None:
|
||||
_equalization_config = QConfigMapping()
|
||||
|
||||
if isinstance(prepare_custom_config, dict):
|
||||
if isinstance(prepare_custom_config, Dict):
|
||||
warnings.warn(
|
||||
"Passing a prepare_custom_config_dict to prepare is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a PrepareCustomConfig instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
"in a future version. Please pass in a PrepareCustomConfig instead.")
|
||||
prepare_custom_config = PrepareCustomConfig.from_dict(prepare_custom_config)
|
||||
|
||||
# swap FloatFunctional with FXFloatFunctional
|
||||
|
|
@ -224,12 +222,10 @@ def fuse_fx(
|
|||
if fuse_custom_config is None:
|
||||
fuse_custom_config = FuseCustomConfig()
|
||||
|
||||
if isinstance(fuse_custom_config, dict):
|
||||
if isinstance(fuse_custom_config, Dict):
|
||||
warnings.warn(
|
||||
"Passing a fuse_custom_config_dict to fuse is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a FuseCustomConfig instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
"in a future version. Please pass in a FuseCustomConfig instead.")
|
||||
fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config)
|
||||
|
||||
torch._C._log_api_usage_once("quantization_api.quantize_fx.fuse_fx")
|
||||
|
|
@ -515,12 +511,10 @@ def _convert_fx(
|
|||
if convert_custom_config is None:
|
||||
convert_custom_config = ConvertCustomConfig()
|
||||
|
||||
if isinstance(convert_custom_config, dict):
|
||||
if isinstance(convert_custom_config, Dict):
|
||||
warnings.warn(
|
||||
"Passing a convert_custom_config_dict to convert is deprecated and will not be supported "
|
||||
"in a future version. Please pass in a ConvertCustomConfig instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
"in a future version. Please pass in a ConvertCustomConfig instead.")
|
||||
convert_custom_config = ConvertCustomConfig.from_dict(convert_custom_config)
|
||||
|
||||
_check_is_graph_module(graph_module)
|
||||
|
|
|
|||
|
|
@ -252,20 +252,17 @@ def backward(
|
|||
)
|
||||
|
||||
if grad_variables is not None:
|
||||
warnings.warn(
|
||||
"`grad_variables` is deprecated. Use `grad_tensors` instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
warnings.warn("'grad_variables' is deprecated. Use 'grad_tensors' instead.")
|
||||
if grad_tensors is None:
|
||||
grad_tensors = grad_variables
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"`grad_tensors` and `grad_variables` (deprecated) "
|
||||
"arguments both passed to `backward()`. Please only "
|
||||
"use `grad_tensors`."
|
||||
"'grad_tensors' and 'grad_variables' (deprecated) "
|
||||
"arguments both passed to backward(). Please only "
|
||||
"use 'grad_tensors'."
|
||||
)
|
||||
if inputs is not None and len(inputs) == 0:
|
||||
raise RuntimeError("`inputs` argument to `backward()` cannot be empty.")
|
||||
raise RuntimeError("'inputs' argument to backward() cannot be empty.")
|
||||
|
||||
tensors = (tensors,) if isinstance(tensors, torch.Tensor) else tuple(tensors)
|
||||
inputs = (
|
||||
|
|
@ -398,8 +395,7 @@ def grad(
|
|||
warnings.warn(
|
||||
"only_inputs argument is deprecated and is ignored now "
|
||||
"(defaults to True). To accumulate gradient for other "
|
||||
"parts of the graph, please use torch.autograd.backward.",
|
||||
FutureWarning,
|
||||
"parts of the graph, please use torch.autograd.backward."
|
||||
)
|
||||
|
||||
grad_outputs_ = _tensor_or_tensors_to_tuple(grad_outputs, len(t_outputs))
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import operator
|
||||
import warnings
|
||||
from functools import reduce
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch._utils
|
||||
|
|
@ -9,12 +9,11 @@ from ..function import Function
|
|||
|
||||
class Type(Function):
|
||||
@staticmethod
|
||||
@deprecated(
|
||||
"`torch.autograd._functions.Type` is deprecated as of PyTorch 2.1, "
|
||||
"please use `torch.tensor.to(dtype=dtype)` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def forward(ctx, i, dest_type):
|
||||
warnings.warn(
|
||||
"torch.autograd._functions.Type is deprecated as of PyTorch 2.1, please use "
|
||||
"torch.tensor.to(dtype=dtype) instead."
|
||||
)
|
||||
ctx.input_type = type(i)
|
||||
ctx.input_device = -1 if not i.is_cuda else i.get_device()
|
||||
return i.type(dest_type)
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import itertools
|
|||
import warnings
|
||||
from collections import OrderedDict
|
||||
from typing import Any, List, Optional, Tuple
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch._C as _C
|
||||
|
|
@ -180,14 +179,12 @@ class FunctionCtx:
|
|||
"""
|
||||
self.dirty_tensors = args
|
||||
|
||||
@deprecated(
|
||||
"`mark_shared_storage` is deprecated. "
|
||||
"Tensors with shared storages are automatically tracked. "
|
||||
"Note that calls to `set_()` are not tracked",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def mark_shared_storage(self, *pairs):
|
||||
pass
|
||||
warnings.warn(
|
||||
"mark_shared_storage is deprecated. "
|
||||
"Tensors with shared storages are automatically tracked. Note "
|
||||
"that calls to `set_()` are not tracked"
|
||||
)
|
||||
|
||||
def mark_non_differentiable(self, *args: torch.Tensor):
|
||||
r"""Mark outputs as non-differentiable.
|
||||
|
|
@ -494,8 +491,9 @@ class Function(_SingleLevelFunction):
|
|||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
cls = self.__class__
|
||||
warnings.warn(
|
||||
f"{self.__class__} should not be instantiated. Methods on autograd functions"
|
||||
f"{cls} should not be instantiated. Methods on autograd functions"
|
||||
"are all static, so you should invoke them on the class itself. "
|
||||
"Instantiating an autograd function will raise an "
|
||||
"error in a future version of PyTorch.",
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ import functools
|
|||
import warnings
|
||||
from itertools import product
|
||||
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.testing
|
||||
|
|
@ -307,14 +306,6 @@ def _get_numerical_jacobian(
|
|||
return jacobians
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`get_numerical_jacobian` was part of PyTorch's private API and not "
|
||||
"meant to be exposed. We are deprecating it and it will be removed "
|
||||
"in a future version of PyTorch. If you have a specific use for "
|
||||
"this or feature request for this to be a stable API, please file "
|
||||
"us an issue at https://github.com/pytorch/pytorch/issues/new",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def get_numerical_jacobian(fn, inputs, target=None, eps=1e-3, grad_out=1.0):
|
||||
"""Compute the numerical Jacobian for a given fn and its inputs.
|
||||
|
||||
|
|
@ -334,6 +325,13 @@ def get_numerical_jacobian(fn, inputs, target=None, eps=1e-3, grad_out=1.0):
|
|||
Note that `target` may not even be part of `input` to `fn`, so please be
|
||||
**very careful** in this to not clone `target`.
|
||||
"""
|
||||
warnings.warn(
|
||||
"get_numerical_jacobian was part of PyTorch's private API and not "
|
||||
"meant to be exposed. We are deprecating it and it will be removed "
|
||||
"in a future version of PyTorch. If you have a specific use for "
|
||||
"this or feature request for this to be a stable API, please file "
|
||||
"us an issue at https://github.com/pytorch/pytorch/issues/new"
|
||||
)
|
||||
if (
|
||||
grad_out != 1.0
|
||||
): # grad_out param is only kept for backward compatibility reasons
|
||||
|
|
@ -820,17 +818,16 @@ def _get_analytical_vJu_backward_mode(
|
|||
return reduced_jacobians
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`get_analytical_jacobian` was part of PyTorch's private API and not "
|
||||
"meant to be exposed. We are deprecating it and it will be removed "
|
||||
"in a future version of PyTorch. If you have a specific use for "
|
||||
"this or feature request for this to be a stable API, please file "
|
||||
"us an issue at https://github.com/pytorch/pytorch/issues/new",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def get_analytical_jacobian(inputs, output, nondet_tol=0.0, grad_out=1.0):
|
||||
# Replicates the behavior of the old get_analytical_jacobian before the refactor
|
||||
# This shares much of its code with _check_analytical_jacobian_attributes
|
||||
warnings.warn(
|
||||
"get_analytical_jacobian was part of PyTorch's private API and not "
|
||||
"meant to be exposed. We are deprecating it and it will be removed "
|
||||
"in a future version of PyTorch. If you have a specific use for "
|
||||
"this or feature request for this to be a stable API, please file "
|
||||
"us an issue at https://github.com/pytorch/pytorch/issues/new"
|
||||
)
|
||||
if (
|
||||
grad_out != 1.0
|
||||
): # grad_out param is only kept for backward compatibility reasons
|
||||
|
|
|
|||
|
|
@ -213,9 +213,7 @@ class profile:
|
|||
self.use_cuda = use_cuda
|
||||
if self.use_cuda:
|
||||
warn(
|
||||
"The attribute `use_cuda` will be deprecated soon, "
|
||||
"please use ``use_device = 'cuda'`` instead.",
|
||||
FutureWarning,
|
||||
"The attribute `use_cuda` will be deprecated soon, please use ``use_device = 'cuda'`` instead."
|
||||
)
|
||||
self.use_device: Optional[str] = "cuda"
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import itertools
|
||||
import warnings
|
||||
from typing_extensions import deprecated
|
||||
from warnings import warn
|
||||
|
||||
import torch
|
||||
import torch.cuda
|
||||
|
|
@ -24,11 +23,6 @@ from torch.autograd.profiler_util import (
|
|||
__all__ = ["profile"]
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.autograd.profiler_legacy.profile` is deprecated and will be removed in a future release. "
|
||||
"Please use `torch.profiler` instead.",
|
||||
category=None, # TODO: change to `FutureWarning`
|
||||
)
|
||||
class profile:
|
||||
"""DEPRECATED: use torch.profiler instead."""
|
||||
|
||||
|
|
@ -57,7 +51,7 @@ class profile:
|
|||
self.with_modules = with_modules
|
||||
|
||||
if self.use_cuda and not torch.cuda.is_available():
|
||||
warnings.warn("CUDA is not available, disabling CUDA profiling")
|
||||
warn("CUDA is not available, disabling CUDA profiling")
|
||||
self.use_cuda = False
|
||||
|
||||
if self.use_cuda:
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ from collections import defaultdict, namedtuple
|
|||
from operator import attrgetter
|
||||
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch.autograd import DeviceType
|
||||
|
|
@ -416,10 +415,6 @@ class FormattedTimesMixin:
|
|||
return 0.0 if self.count == 0 else 1.0 * self.device_time_total / self.count # type: ignore[attr-defined]
|
||||
|
||||
@property
|
||||
@deprecated(
|
||||
"`cuda_time` is deprecated, please use `device_time` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def cuda_time(self): # To be deprecated
|
||||
return self.device_time
|
||||
|
||||
|
|
@ -543,12 +538,8 @@ class FunctionEvent(FormattedTimesMixin):
|
|||
)
|
||||
|
||||
@property
|
||||
@deprecated(
|
||||
"`self_cuda_memory_usage` is deprecated. Use `self_device_memory_usage` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def self_cuda_memory_usage(self): # To be deprecated
|
||||
return self.self_device_memory_usage
|
||||
self.self_device_memory_usage
|
||||
|
||||
@property
|
||||
def cpu_time_total(self):
|
||||
|
|
@ -583,12 +574,8 @@ class FunctionEvent(FormattedTimesMixin):
|
|||
return self.time_range.elapsed_us()
|
||||
|
||||
@property
|
||||
@deprecated(
|
||||
"`cuda_time_total` is deprecated. Use `device_time_total` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def cuda_time_total(self): # To be deprecated
|
||||
return self.device_time_total
|
||||
self.device_time_total
|
||||
|
||||
@property
|
||||
def self_device_time_total(self):
|
||||
|
|
@ -603,12 +590,8 @@ class FunctionEvent(FormattedTimesMixin):
|
|||
return self.device_time_total
|
||||
|
||||
@property
|
||||
@deprecated(
|
||||
"`self_cuda_time_total` is deprecated. Use `self_device_time_total` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def self_cuda_time_total(self): # To be deprecated
|
||||
return self.self_device_time_total
|
||||
self.self_device_time_total
|
||||
|
||||
@property
|
||||
def key(self):
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import contextlib
|
||||
import warnings
|
||||
|
||||
from typing import Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -377,15 +377,6 @@ def enable_cudnn_sdp(enabled: bool):
|
|||
|
||||
|
||||
@contextlib.contextmanager
|
||||
@deprecated(
|
||||
(
|
||||
"`torch.backends.cuda.sdp_kernel()` is deprecated. "
|
||||
"In the future, this context manager will be removed. "
|
||||
"Please see `torch.nn.attention.sdpa_kernel()` for the new context manager, "
|
||||
"with updated signature."
|
||||
),
|
||||
category=FutureWarning,
|
||||
)
|
||||
def sdp_kernel(
|
||||
enable_flash: bool = True,
|
||||
enable_math: bool = True,
|
||||
|
|
@ -398,6 +389,15 @@ def sdp_kernel(
|
|||
This context manager can be used to temporarily enable or disable any of the three backends for scaled dot product attention.
|
||||
Upon exiting the context manager, the previous state of the flags will be restored.
|
||||
"""
|
||||
warnings.warn(
|
||||
(
|
||||
"torch.backends.cuda.sdp_kernel() "
|
||||
"is deprecated. In the future, this context manager will be removed. "
|
||||
"Please see, torch.nn.attention.sdpa_kernel() for the new context manager, with updated "
|
||||
"signature."
|
||||
),
|
||||
FutureWarning,
|
||||
)
|
||||
from torch.nn.attention import sdpa_kernel
|
||||
|
||||
backend_list = []
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import warnings
|
||||
from typing import Any
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -12,11 +12,6 @@ class autocast(torch.amp.autocast_mode.autocast):
|
|||
``torch.cpu.amp.autocast(args...)`` is deprecated. Please use ``torch.amp.autocast("cpu", args...)`` instead.
|
||||
"""
|
||||
|
||||
@deprecated(
|
||||
"`torch.cpu.amp.autocast(args...)` is deprecated. "
|
||||
"Please use `torch.amp.autocast('cpu', args...)` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def __init__(
|
||||
self,
|
||||
enabled: bool = True,
|
||||
|
|
@ -28,6 +23,10 @@ class autocast(torch.amp.autocast_mode.autocast):
|
|||
self.device = "cpu"
|
||||
self.fast_dtype = dtype
|
||||
return
|
||||
warnings.warn(
|
||||
"torch.cpu.amp.autocast(args...) is deprecated. Please use torch.amp.autocast('cpu', args...) instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
super().__init__(
|
||||
"cpu", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from typing_extensions import deprecated
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -11,11 +11,6 @@ class GradScaler(torch.amp.GradScaler):
|
|||
``torch.cpu.amp.GradScaler(args...)`` is deprecated. Please use ``torch.amp.GradScaler("cpu", args...)`` instead.
|
||||
"""
|
||||
|
||||
@deprecated(
|
||||
"`torch.cpu.amp.GradScaler(args...)` is deprecated. "
|
||||
"Please use `torch.amp.GradScaler('cpu', args...)` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def __init__(
|
||||
self,
|
||||
init_scale: float = 2.0**16,
|
||||
|
|
@ -24,6 +19,9 @@ class GradScaler(torch.amp.GradScaler):
|
|||
growth_interval: int = 2000,
|
||||
enabled: bool = True,
|
||||
) -> None:
|
||||
warnings.warn(
|
||||
"torch.cpu.amp.GradScaler(args...) is deprecated. Please use torch.amp.GradScaler('cpu', args...) instead."
|
||||
)
|
||||
super().__init__(
|
||||
"cpu",
|
||||
init_scale=init_scale,
|
||||
|
|
|
|||
|
|
@ -145,8 +145,8 @@ def compare(before, after, format_flamegraph=format_flamegraph):
|
|||
before_segs = {_seg_key(seg) for seg in before}
|
||||
after_segs = {_seg_key(seg) for seg in after}
|
||||
|
||||
print(f'only_before = {[a for a, _ in (before_segs - after_segs)]}')
|
||||
print(f'only_after = {[a for a, _ in (after_segs - before_segs)]}')
|
||||
print(f'only_before = {[a for a,_ in (before_segs - after_segs)]}')
|
||||
print(f'only_after = {[a for a,_ in (after_segs - before_segs)]}')
|
||||
|
||||
for seg in before:
|
||||
if _seg_key(seg) not in after_segs:
|
||||
|
|
@ -382,7 +382,7 @@ add_local_files(local_files, $VIZ_KIND)
|
|||
|
||||
def _format_viz(data, viz_kind, device):
|
||||
if device is not None:
|
||||
warnings.warn('device argument is deprecated, plots now contain all device', FutureWarning)
|
||||
warnings.warn('device argument is deprecated, plots now contain all device')
|
||||
buffer = pickle.dumps(data)
|
||||
buffer += b'\x00' * (3 - len(buffer) % 3)
|
||||
# Encode the buffer with base64
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import functools
|
||||
import warnings
|
||||
from typing import Any
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -13,11 +13,6 @@ class autocast(torch.amp.autocast_mode.autocast):
|
|||
``torch.cuda.amp.autocast(args...)`` is deprecated. Please use ``torch.amp.autocast("cuda", args...)`` instead.
|
||||
"""
|
||||
|
||||
@deprecated(
|
||||
"`torch.cuda.amp.autocast(args...)` is deprecated. "
|
||||
"Please use `torch.amp.autocast('cuda', args...)` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def __init__(
|
||||
self,
|
||||
enabled: bool = True,
|
||||
|
|
@ -29,6 +24,10 @@ class autocast(torch.amp.autocast_mode.autocast):
|
|||
self.device = "cuda"
|
||||
self.fast_dtype = dtype
|
||||
return
|
||||
warnings.warn(
|
||||
"torch.cuda.amp.autocast(args...) is deprecated. Please use torch.amp.autocast('cuda', args...) instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
super().__init__(
|
||||
"cuda", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled
|
||||
)
|
||||
|
|
@ -50,29 +49,25 @@ class autocast(torch.amp.autocast_mode.autocast):
|
|||
return super().__call__(func)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.cuda.amp.custom_fwd(args...)` is deprecated. "
|
||||
"Please use `torch.amp.custom_fwd(args..., device_type='cuda')` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def custom_fwd(fwd=None, *, cast_inputs=None):
|
||||
"""
|
||||
``torch.cuda.amp.custom_fwd(args...)`` is deprecated. Please use
|
||||
``torch.amp.custom_fwd(args..., device_type='cuda')`` instead.
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.cuda.amp.custom_fwd(args...) is deprecated. Please use torch.amp.custom_fwd(args..., device_type='cuda') instead."
|
||||
)
|
||||
return functools.partial(torch.amp.custom_fwd, device_type="cuda")(
|
||||
fwd=fwd, cast_inputs=cast_inputs
|
||||
)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.cuda.amp.custom_bwd(args...)` is deprecated. "
|
||||
"Please use `torch.amp.custom_bwd(args..., device_type='cuda')` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def custom_bwd(bwd):
|
||||
"""
|
||||
``torch.cuda.amp.custom_bwd(args...)`` is deprecated. Please use
|
||||
``torch.amp.custom_bwd(args..., device_type='cuda')`` instead.
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.cuda.amp.custom_bwd(args...) is deprecated. Please use torch.amp.custom_bwd(args..., device_type='cuda') instead."
|
||||
)
|
||||
return functools.partial(torch.amp.custom_bwd, device_type="cuda")(bwd)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from typing_extensions import deprecated
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -11,11 +11,6 @@ class GradScaler(torch.amp.GradScaler):
|
|||
``torch.cuda.amp.GradScaler(args...)`` is deprecated. Please use ``torch.amp.GradScaler("cuda", args...)`` instead.
|
||||
"""
|
||||
|
||||
@deprecated(
|
||||
"`torch.cuda.amp.GradScaler(args...)` is deprecated. "
|
||||
"Please use `torch.amp.GradScaler('cuda', args...)` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def __init__(
|
||||
self,
|
||||
init_scale: float = 2.0**16,
|
||||
|
|
@ -24,6 +19,9 @@ class GradScaler(torch.amp.GradScaler):
|
|||
growth_interval: int = 2000,
|
||||
enabled: bool = True,
|
||||
) -> None:
|
||||
warnings.warn(
|
||||
"torch.cuda.amp.GradScaler(args...) is deprecated. Please use torch.amp.GradScaler('cuda', args...) instead."
|
||||
)
|
||||
super().__init__(
|
||||
"cuda",
|
||||
init_scale=init_scale,
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import warnings
|
|||
from inspect import signature
|
||||
|
||||
from typing import Any, Dict, Optional, Tuple, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch import _C
|
||||
|
|
@ -447,21 +446,21 @@ def max_memory_reserved(device: Union[Device, int] = None) -> int:
|
|||
return memory_stats(device=device).get("reserved_bytes.all.peak", 0)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.cuda.memory_cached` has been renamed to `torch.cuda.memory_reserved`",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def memory_cached(device: Union[Device, int] = None) -> int:
|
||||
r"""Deprecated; see :func:`~torch.cuda.memory_reserved`."""
|
||||
warnings.warn(
|
||||
"torch.cuda.memory_cached has been renamed to torch.cuda.memory_reserved",
|
||||
FutureWarning,
|
||||
)
|
||||
return memory_reserved(device=device)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.cuda.max_memory_cached` has been renamed to `torch.cuda.max_memory_reserved`",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def max_memory_cached(device: Union[Device, int] = None) -> int:
|
||||
r"""Deprecated; see :func:`~torch.cuda.max_memory_reserved`."""
|
||||
warnings.warn(
|
||||
"torch.cuda.max_memory_cached has been renamed to torch.cuda.max_memory_reserved",
|
||||
FutureWarning,
|
||||
)
|
||||
return max_memory_reserved(device=device)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -89,9 +89,8 @@ def reduce(
|
|||
)
|
||||
else:
|
||||
warnings.warn(
|
||||
"`nccl.reduce` with an output tensor list is deprecated. "
|
||||
"Please specify a single output tensor with argument 'output' instead instead.",
|
||||
FutureWarning,
|
||||
"nccl.reduce with an output tensor list is deprecated. "
|
||||
"Please specify a single output tensor with argument 'output' instead instead."
|
||||
)
|
||||
_output = outputs[root]
|
||||
elif not isinstance(output, torch.Tensor) and isinstance(
|
||||
|
|
@ -100,8 +99,7 @@ def reduce(
|
|||
# User called old API with positional arguments of list of output tensors.
|
||||
warnings.warn(
|
||||
"nccl.reduce with an output tensor list is deprecated. "
|
||||
"Please specify a single output tensor.",
|
||||
FutureWarning,
|
||||
"Please specify a single output tensor."
|
||||
)
|
||||
_output = output[root]
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import warnings
|
||||
from typing import Callable, Iterable, Optional, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
|
@ -38,13 +38,6 @@ from torch.distributed.fsdp.wrap import _Policy
|
|||
|
||||
|
||||
@contract(state_cls=_FSDPState)
|
||||
@deprecated(
|
||||
"`torch.distributed._composable.fully_shard` is being deprecated. "
|
||||
"You can continue to use the wrapper based FSDP. "
|
||||
"See usage in: https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/fully_sharded_data_parallel.py. "
|
||||
"`torch.distributed._composable.fully_shard` will be removed after PyTorch 2.5.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def fully_shard(
|
||||
module: nn.Module,
|
||||
*,
|
||||
|
|
@ -62,7 +55,16 @@ def fully_shard(
|
|||
Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]]
|
||||
] = None,
|
||||
) -> nn.Module:
|
||||
"""Applies ``FullyShardedDataParallel`` (FSDP) semantics to ``module``."""
|
||||
"""
|
||||
Applies ``FullyShardedDataParallel` (FSDP) semantics to ``module``.
|
||||
"""
|
||||
warnings.warn(
|
||||
"``torch.distributed._composable.fully_shard`` is being deprecated."
|
||||
"You can contintue to use the wrapper based FSDP."
|
||||
"See usage in: https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/fully_sharded_data_parallel.py."
|
||||
"``torch.distributed._composable.fully_shard`` will be removed after PyTorch 2.5."
|
||||
)
|
||||
|
||||
torch._C._log_api_usage_once("torch.distributed.fully_shard")
|
||||
# Enforce the new auto wrap policy
|
||||
if policy is not None and not isinstance(policy, _Policy):
|
||||
|
|
|
|||
|
|
@ -766,8 +766,7 @@ def _resolve_group_name(group: RANK_TYPES, tag: str = "") -> str:
|
|||
warnings.warn(
|
||||
"The combination of ranks + tag as process group "
|
||||
"identifier has been deprecated. Please switch to "
|
||||
"using ProcessGroup, DeviceMesh, or group name instead.",
|
||||
FutureWarning,
|
||||
"using ProcessGroup, DeviceMesh, or group name instead."
|
||||
)
|
||||
return c10d._resolve_group_name_by_ranks_and_tag(cast(List[int], group), tag)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -5,15 +5,8 @@ import torch
|
|||
import warnings
|
||||
|
||||
from torch.distributed.checkpoint import * # noqa: F403
|
||||
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("always")
|
||||
warnings.warn(
|
||||
"`torch.distributed._shard.checkpoint` will be deprecated, "
|
||||
"use `torch.distributed.checkpoint` instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
warnings.warn(
|
||||
"torch.distributed._shard.checkpoint will be deprecated, use torch.distributed.checkpoint instead",
|
||||
DeprecationWarning
|
||||
)
|
||||
sys.modules['torch.distributed._shard.checkpoint'] = torch.distributed.checkpoint
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ from typing import (
|
|||
cast,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
from typing_extensions import deprecated
|
||||
import copy
|
||||
import warnings
|
||||
from functools import reduce
|
||||
|
|
@ -397,7 +396,7 @@ class ShardedTensor(ShardedTensorBase):
|
|||
return reduce(operator.mul, shard_md.shard_sizes) # type: ignore[attr-defined]
|
||||
|
||||
if enforce_dtype:
|
||||
warnings.warn("`enforce_dtype` is deprecated. Please use `dtype` instead.", FutureWarning)
|
||||
warnings.warn("enforce_dtype is deprecated. Please use dtype instead.")
|
||||
|
||||
rank = dist.get_rank(self._process_group)
|
||||
full_size = self.metadata().size
|
||||
|
|
@ -738,7 +737,6 @@ class ShardedTensor(ShardedTensorBase):
|
|||
return sharded_tensor
|
||||
|
||||
@classmethod
|
||||
@deprecated(DEPRECATE_MSG, category=FutureWarning)
|
||||
def _init_from_local_tensor(
|
||||
cls,
|
||||
local_tensor: torch.Tensor,
|
||||
|
|
@ -803,6 +801,8 @@ class ShardedTensor(ShardedTensorBase):
|
|||
We fully rely on the user to ensure local tensor is sharded based on the
|
||||
sharding spec.
|
||||
"""
|
||||
warnings.warn(DEPRECATE_MSG)
|
||||
|
||||
if not local_tensor.is_contiguous():
|
||||
raise ValueError('local_tensor is not a contiguous Tensor.')
|
||||
|
||||
|
|
@ -980,7 +980,6 @@ class ShardedTensor(ShardedTensorBase):
|
|||
"""
|
||||
return self._sharding_spec
|
||||
|
||||
@deprecated(DEPRECATE_MSG, category=FutureWarning)
|
||||
def reshard(self, resharding_spec: shard_spec.ShardingSpec) -> ShardedTensor:
|
||||
"""
|
||||
Reshard a sharded tensor given the ``resharding_spec``. For now, we only support
|
||||
|
|
@ -1051,6 +1050,8 @@ class ShardedTensor(ShardedTensorBase):
|
|||
tensor([[3], [3], [5], [5], [7], [7], [9], [9]]) # Rank 2
|
||||
tensor([[4], [4], [6], [6], [8], [8], [10], [10]]) # Rank 3
|
||||
"""
|
||||
warnings.warn(DEPRECATE_MSG)
|
||||
|
||||
if (
|
||||
not isinstance(resharding_spec, shard_spec.ChunkShardingSpec) or
|
||||
not isinstance(self._sharding_spec, shard_spec.ChunkShardingSpec)
|
||||
|
|
@ -1095,7 +1096,6 @@ class ShardedTensor(ShardedTensorBase):
|
|||
return self.local_shards()[0].tensor
|
||||
|
||||
@classmethod
|
||||
@deprecated(DEPRECATE_MSG, category=FutureWarning)
|
||||
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
||||
def dispatch(st: ShardedTensor, func: Callable):
|
||||
# Dispatch to custom user provided op first if it exists.
|
||||
|
|
@ -1120,6 +1120,7 @@ class ShardedTensor(ShardedTensorBase):
|
|||
f"torch function '{func.__name__}', with args: {args} and "
|
||||
f"kwargs: {kwargs} not supported for ShardedTensor!")
|
||||
|
||||
warnings.warn(DEPRECATE_MSG)
|
||||
# Find ShardedTensor instance to get process_group and sharding_spec.
|
||||
st_instance = None
|
||||
|
||||
|
|
|
|||
|
|
@ -5,14 +5,8 @@ import torch
|
|||
import warnings
|
||||
|
||||
from torch.distributed._shard.sharded_tensor import * # noqa: F403
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("always")
|
||||
warnings.warn(
|
||||
"`torch.distributed._sharded_tensor` will be deprecated, "
|
||||
"use `torch.distributed._shard.sharded_tensor` instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
warnings.warn(
|
||||
"torch.distributed._sharded_tensor will be deprecated, use torch.distributed._shard.sharded_tensor instead",
|
||||
DeprecationWarning
|
||||
)
|
||||
sys.modules['torch.distributed._sharded_tensor'] = torch.distributed._shard.sharded_tensor
|
||||
|
|
|
|||
|
|
@ -5,15 +5,10 @@ import torch
|
|||
import warnings
|
||||
|
||||
from torch.distributed._shard.sharding_spec import * # noqa: F403
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("always")
|
||||
warnings.warn(
|
||||
"`torch.distributed._sharding_spec` will be deprecated, "
|
||||
"use `torch.distributed._shard.sharding_spec` instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
warnings.warn(
|
||||
"torch.distributed._sharding_spec will be deprecated, use torch.distributed._shard.sharding_spec instead",
|
||||
DeprecationWarning
|
||||
)
|
||||
|
||||
import torch.distributed._shard.sharding_spec as _sharding_spec
|
||||
sys.modules['torch.distributed._sharding_spec'] = _sharding_spec
|
||||
|
|
|
|||
|
|
@ -746,7 +746,6 @@ def distribute_module(
|
|||
warnings.warn(
|
||||
"Deprecating input_fn that takes two arguments (inputs, device_mesh), "
|
||||
"please use input_fn that takes in (module, inputs, device_mesh) instead!",
|
||||
FutureWarning,
|
||||
)
|
||||
module.register_forward_pre_hook(lambda _, inputs: input_fn(inputs, device_mesh)) # type: ignore[call-arg]
|
||||
elif num_args == 3:
|
||||
|
|
@ -766,7 +765,6 @@ def distribute_module(
|
|||
warnings.warn(
|
||||
"Deprecating output_fn that takes two arguments (inputs, device_mesh), "
|
||||
"please use output_fn that takes in (module, inputs, device_mesh) instead!",
|
||||
FutureWarning,
|
||||
)
|
||||
module.register_forward_hook(
|
||||
lambda mod, inputs, outputs: output_fn(outputs, device_mesh) # type: ignore[call-arg]
|
||||
|
|
|
|||
|
|
@ -233,7 +233,6 @@ def checkpoint_wrapper(
|
|||
f"Please specify {CheckpointImpl.NO_REENTRANT} as "
|
||||
f"{CheckpointImpl.REENTRANT} will soon be removed as "
|
||||
"the default and eventually deprecated.",
|
||||
FutureWarning,
|
||||
stacklevel=1,
|
||||
)
|
||||
return CheckpointWrapper(
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ def fp16_compress_hook(
|
|||
decompressed_tensor.copy_(value)
|
||||
return decompressed_tensor
|
||||
|
||||
if torch.compiler.is_compiling():
|
||||
if torch._utils.is_compiling():
|
||||
grad = dist._functional_collectives.all_reduce(
|
||||
compressed_tensor, "sum", group_to_use
|
||||
)
|
||||
|
|
@ -134,7 +134,7 @@ def bf16_compress_hook(
|
|||
decompressed_tensor.copy_(value)
|
||||
return decompressed_tensor
|
||||
|
||||
if torch.compiler.is_compiling():
|
||||
if torch._utils.is_compiling():
|
||||
grad = dist._functional_collectives.all_reduce(
|
||||
compressed_tensor, "sum", group_to_use
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
import os
|
||||
import warnings
|
||||
from typing import Any, cast, Dict, Optional, Set, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
|
@ -18,11 +17,6 @@ from .utils import _all_gather_keys, _api_bc_check, _DistWrapper, _profile
|
|||
__all__ = ["load_state_dict", "load"]
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`load_state_dict` is deprecated and will be removed in future versions. "
|
||||
"Please use `load` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def load_state_dict(
|
||||
state_dict: Dict[str, Any],
|
||||
storage_reader: StorageReader,
|
||||
|
|
@ -32,6 +26,10 @@ def load_state_dict(
|
|||
planner: Optional[LoadPlanner] = None,
|
||||
) -> None:
|
||||
"""This method is deprecated. Please switch to 'load'."""
|
||||
warnings.warn(
|
||||
"'load_state_dict' is deprecated and will be removed in future versions. "
|
||||
"Please use 'load' instead."
|
||||
)
|
||||
storage_reader.reset()
|
||||
with _profile():
|
||||
# TODO: test returning `load` here instead.
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ import os
|
|||
import warnings
|
||||
from concurrent.futures import Future, ThreadPoolExecutor
|
||||
from typing import cast, Optional, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
|
@ -25,11 +24,6 @@ from .utils import _api_bc_check, _DistWrapper, _profile
|
|||
__all__ = ["save_state_dict", "save", "async_save"]
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`save_state_dict` is deprecated and will be removed in future versions."
|
||||
"Please use `save` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def save_state_dict(
|
||||
state_dict: STATE_DICT_TYPE,
|
||||
storage_writer: StorageWriter,
|
||||
|
|
@ -39,6 +33,11 @@ def save_state_dict(
|
|||
planner: Optional[SavePlanner] = None,
|
||||
) -> Metadata:
|
||||
"""This method is deprecated. Please switch to 'save'."""
|
||||
warnings.warn(
|
||||
"'save_state_dict' is deprecated and will be removed in future versions."
|
||||
"Please use 'save' instead."
|
||||
)
|
||||
|
||||
storage_writer.reset()
|
||||
|
||||
# TODO: test returning `save` here instead.
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ import warnings
|
|||
from collections import namedtuple
|
||||
from datetime import timedelta
|
||||
from typing import Any, Callable, Dict, Optional, Tuple, Union, List, TYPE_CHECKING
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch._C._distributed_c10d import (
|
||||
|
|
@ -365,12 +364,11 @@ class _reduce_op:
|
|||
setattr(self, k, v)
|
||||
self.__members__ = ReduceOp.RedOpType.__members__
|
||||
|
||||
@deprecated(
|
||||
"`torch.distributed.reduce_op` is deprecated, "
|
||||
"please use `torch.distributed.ReduceOp` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def __getattribute__(self, key):
|
||||
warnings.warn(
|
||||
"torch.distributed.reduce_op is deprecated, please use "
|
||||
"torch.distributed.ReduceOp instead"
|
||||
)
|
||||
return object.__getattribute__(self, key)
|
||||
|
||||
|
||||
|
|
@ -677,8 +675,7 @@ def _get_pg_default_device(group: Optional[ProcessGroup] = None) -> torch.device
|
|||
warnings.warn(
|
||||
f"You are using a Backend {type(group)} as a ProcessGroup. "
|
||||
"This usage is deprecated since PyTorch 2.0. Please use a public API "
|
||||
"of PyTorch Distributed instead.",
|
||||
FutureWarning,
|
||||
"of PyTorch Distributed instead."
|
||||
)
|
||||
# Most users create Gloo with private API for object collectives
|
||||
_world.pg_default_device[group] = torch.device("cpu")
|
||||
|
|
@ -832,15 +829,13 @@ def get_global_rank(group: ProcessGroup, group_rank: int) -> int:
|
|||
return rank
|
||||
raise ValueError(f"Group rank {group_rank} is not part of group {group}")
|
||||
|
||||
|
||||
# TODO: remove this once the ecosystem moves away from it.
|
||||
@deprecated(
|
||||
"`torch.distributed.distributed_c10d._get_global_rank` is deprecated, "
|
||||
"please use `torch.distributed.distributed_c10d.get_global_rank` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def _get_global_rank(group, rank) -> int:
|
||||
"""Use get_global_rank as this method is deprecated."""
|
||||
warnings.warn(
|
||||
"torch.distributed.distributed_c10d._get_global_rank is deprecated "
|
||||
"please use torch.distributed.distributed_c10d.get_global_rank instead"
|
||||
)
|
||||
return get_global_rank(group, rank)
|
||||
|
||||
|
||||
|
|
@ -2291,12 +2286,6 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=None, async_op=False):
|
|||
work.wait()
|
||||
|
||||
@_exception_logger
|
||||
@deprecated(
|
||||
"`torch.distributed.all_reduce_coalesced` will be deprecated. If you must "
|
||||
"use it, please revisit our documentation later at "
|
||||
"https://pytorch.org/docs/main/distributed.html#collective-functions",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def all_reduce_coalesced(tensors, op=ReduceOp.SUM, group=None, async_op=False):
|
||||
"""
|
||||
WARNING: at this time individual shape checking is not implemented across nodes.
|
||||
|
|
@ -2331,6 +2320,11 @@ def all_reduce_coalesced(tensors, op=ReduceOp.SUM, group=None, async_op=False):
|
|||
None, if not async_op or if not part of the group.
|
||||
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.distributed.all_reduce_coalesced will be deprecated. If you must "
|
||||
"use it, please revisit our documentation later at "
|
||||
"https://pytorch.org/docs/main/distributed.html#collective-functions"
|
||||
)
|
||||
if isinstance(tensors, torch.Tensor):
|
||||
tensors = [tensors]
|
||||
_check_tensor_list(tensors, "tensor")
|
||||
|
|
@ -3204,11 +3198,6 @@ def all_gather_into_tensor(output_tensor, input_tensor, group=None, async_op=Fal
|
|||
|
||||
|
||||
@_exception_logger
|
||||
@deprecated(
|
||||
"`torch.distributed._all_gather_base` is a private function and will be deprecated. "
|
||||
"Please use `torch.distributed.all_gather_into_tensor` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def _all_gather_base(output_tensor, input_tensor, group=None, async_op=False):
|
||||
"""
|
||||
Single tensor all gather. Gathers a single tensor from all ranks, and puts them in a single output tensor.
|
||||
|
|
@ -3230,16 +3219,15 @@ def _all_gather_base(output_tensor, input_tensor, group=None, async_op=False):
|
|||
`all_gather_into_tensor` instead.
|
||||
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.distributed._all_gather_base is a private function and will be "
|
||||
"deprecated. Please use torch.distributed.all_gather_into_tensor "
|
||||
"instead."
|
||||
)
|
||||
return all_gather_into_tensor(output_tensor, input_tensor, group, async_op)
|
||||
|
||||
|
||||
@_exception_logger
|
||||
@deprecated(
|
||||
"`torch.distributed.all_gather_coalesced` will be deprecated. If you must use it, "
|
||||
"please revisit our documentation later at "
|
||||
"https://pytorch.org/docs/main/distributed.html#collective-functions",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def all_gather_coalesced(
|
||||
output_tensor_lists, input_tensor_list, group=None, async_op=False
|
||||
):
|
||||
|
|
@ -3286,6 +3274,11 @@ def all_gather_coalesced(
|
|||
performance improvements but users of this function should take extra care
|
||||
to ensure that each node passes in tensors whose shapes match across nodes.
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.distributed.all_gather_coalesced will be deprecated. If you must "
|
||||
"use it, please revisit our documentation later at "
|
||||
"https://pytorch.org/docs/main/distributed.html#collective-functions"
|
||||
)
|
||||
# We only check basic compatibility with C++ params here, C++ code will
|
||||
# do shape and type checking.
|
||||
if _rank_not_in_group(group):
|
||||
|
|
@ -3615,11 +3608,6 @@ def reduce_scatter_tensor(output, input, op=ReduceOp.SUM, group=None, async_op=F
|
|||
work.wait()
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.distributed._reduce_scatter_base` is a private function and will be deprecated. "
|
||||
"Please use `torch.distributed.reduce_scatter_tensor` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def _reduce_scatter_base(output, input, op=ReduceOp.SUM, group=None, async_op=False):
|
||||
"""
|
||||
Reduces, then scatters a flattened tensor to all processes in a group.
|
||||
|
|
@ -3640,6 +3628,11 @@ def _reduce_scatter_base(output, input, op=ReduceOp.SUM, group=None, async_op=Fa
|
|||
`reduce_scatter_tensor` instead.
|
||||
|
||||
"""
|
||||
warnings.warn(
|
||||
"torch.distributed._reduce_scatter_base is a private function and will "
|
||||
"be deprecated. Please use torch.distributed.reduce_scatter_tensor "
|
||||
"instead."
|
||||
)
|
||||
return reduce_scatter_tensor(output, input, op, group, async_op)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,10 +8,10 @@
|
|||
|
||||
import abc
|
||||
import time
|
||||
import warnings
|
||||
from collections import namedtuple
|
||||
from functools import wraps
|
||||
from typing import Dict, Optional
|
||||
from typing_extensions import deprecated
|
||||
|
||||
__all__ = ['MetricsConfig', 'MetricHandler', 'ConsoleMetricHandler', 'NullMetricHandler', 'MetricStream',
|
||||
'configure', 'getStream', 'prof', 'profile', 'put_metric', 'publish_metric', 'get_elapsed_time_ms',
|
||||
|
|
@ -137,7 +137,6 @@ def prof(fn=None, group: str = "torchelastic"):
|
|||
return wrap
|
||||
|
||||
|
||||
@deprecated("Deprecated, use `@prof` instead", category=FutureWarning)
|
||||
def profile(group=None):
|
||||
"""
|
||||
@profile decorator adds latency and success/failure metrics to any given function.
|
||||
|
|
@ -149,6 +148,8 @@ def profile(group=None):
|
|||
@metrics.profile("my_metric_group")
|
||||
def some_function(<arguments>):
|
||||
"""
|
||||
warnings.warn("Deprecated, use @prof instead", DeprecationWarning)
|
||||
|
||||
def wrap(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
|
|
@ -186,11 +187,10 @@ def put_metric(metric_name: str, metric_value: int, metric_group: str = "torchel
|
|||
getStream(metric_group).add_value(metric_name, metric_value)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"Deprecated, use `put_metric(metric_group)(metric_name, metric_value)` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def publish_metric(metric_group: str, metric_name: str, metric_value: int):
|
||||
warnings.warn(
|
||||
"Deprecated, use put_metric(metric_group)(metric_name, metric_value) instead"
|
||||
)
|
||||
metric_stream = getStream(metric_group)
|
||||
metric_stream.add_value(metric_name, metric_value)
|
||||
|
||||
|
|
|
|||
|
|
@ -446,8 +446,7 @@ def _init_core_state(
|
|||
elif sharding_strategy == ShardingStrategy.NO_SHARD:
|
||||
warnings.warn(
|
||||
"The `NO_SHARD` sharding strategy is deprecated. If having issues, "
|
||||
"please use `DistributedDataParallel` instead.",
|
||||
FutureWarning,
|
||||
"please use DistributedDataParallel instead.",
|
||||
# Level 1 is here, level 2 is from `FullyShardedDataParallel`, and
|
||||
# level 3 is from the true caller
|
||||
stacklevel=3,
|
||||
|
|
|
|||
|
|
@ -1201,9 +1201,8 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
def _warn_optim_input(optim_input):
|
||||
if optim_input is not None:
|
||||
warnings.warn(
|
||||
"The `optim_input` argument is deprecated and will be removed after PyTorch 1.13. "
|
||||
"You may remove it from your code without changing its functionality.",
|
||||
FutureWarning,
|
||||
"The `optim_input` argument is deprecated and will be removed after PyTorch 1.13. You may remove it "
|
||||
"from your code without changing its functionality."
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -1222,8 +1221,7 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
|
|||
warnings.warn(
|
||||
f"``FullyShardedDataParallel.{curr}``is being deprecated and is "
|
||||
f"replaced by ``FullyShardedDataParallel.{new}``. "
|
||||
f"``FullyShardedDataParallel.{curr}`` may be removed after PyTorch 2.2.",
|
||||
FutureWarning,
|
||||
f"``FullyShardedDataParallel.{curr}`` may be removed after PyTorch 2.2."
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ will not pass ``--local-rank`` when you specify this flag.
|
|||
|
||||
"""
|
||||
|
||||
from typing_extensions import deprecated as _deprecated
|
||||
import warnings
|
||||
|
||||
from torch.distributed.run import get_args_parser, run
|
||||
|
||||
|
|
@ -188,17 +188,17 @@ def launch(args):
|
|||
run(args)
|
||||
|
||||
|
||||
@_deprecated(
|
||||
"The module torch.distributed.launch is deprecated\n"
|
||||
"and will be removed in future. Use torchrun.\n"
|
||||
"Note that --use-env is set by default in torchrun.\n"
|
||||
"If your script expects `--local-rank` argument to be set, please\n"
|
||||
"change it to read from `os.environ['LOCAL_RANK']` instead. See \n"
|
||||
"https://pytorch.org/docs/stable/distributed.html#launch-utility for \n"
|
||||
"further instructions\n",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def main(args=None):
|
||||
warnings.warn(
|
||||
"The module torch.distributed.launch is deprecated\n"
|
||||
"and will be removed in future. Use torchrun.\n"
|
||||
"Note that --use-env is set by default in torchrun.\n"
|
||||
"If your script expects `--local-rank` argument to be set, please\n"
|
||||
"change it to read from `os.environ['LOCAL_RANK']` instead. See \n"
|
||||
"https://pytorch.org/docs/stable/distributed.html#launch-utility for \n"
|
||||
"further instructions\n",
|
||||
FutureWarning,
|
||||
)
|
||||
args = parse_args(args)
|
||||
launch(args)
|
||||
|
||||
|
|
|
|||
|
|
@ -5,8 +5,6 @@ optimizer locally on the workers where the parameters live. The distributed
|
|||
optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
|
||||
apply the gradients on each worker.
|
||||
"""
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
from torch import optim
|
||||
|
||||
|
|
@ -26,15 +24,9 @@ from .functional_sgd import _FunctionalSGD
|
|||
from .named_optimizer import _NamedOptimizer
|
||||
from .utils import as_functional_optim
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("always")
|
||||
warnings.warn(
|
||||
"`TorchScript` support for functional optimizers is deprecated "
|
||||
"and will be removed in a future PyTorch release. "
|
||||
"Consider using the `torch.compile` optimizer instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
from warnings import warn
|
||||
warn("TorchScript support for functional optimizers is"
|
||||
"deprecated and will be removed in a future PyTorch release. Consider using the torch.compile optimizer instead.")
|
||||
|
||||
# DistributedOptimizer imports torch.distributed.rpc names, so gate availability
|
||||
# based on RPC being available.
|
||||
|
|
|
|||
|
|
@ -1,13 +1,7 @@
|
|||
import warnings
|
||||
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("always")
|
||||
warnings.warn(
|
||||
"`torch.distributed.pipeline` is deprecated. For up-to-date pipeline parallel "
|
||||
"implementation, please refer to the PiPPy library under the PyTorch "
|
||||
"organization (Pipeline Parallelism for PyTorch): "
|
||||
"https://github.com/pytorch/PiPPy",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
warnings.warn(
|
||||
"torch.distributed.pipeline is deprecated. For up-to-date pipeline parallel "
|
||||
"implementation, please refer to the PiPPy library under the PyTorch "
|
||||
"organization (Pipeline Parallelism for PyTorch): "
|
||||
"https://github.com/pytorch/PiPPy"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -22,10 +22,7 @@ def _deprecate_warnings(func_name: str, extra_msg: str) -> None:
|
|||
"""
|
||||
# TODO: Will follow up with dynamo POC to make warnings.warn working with dynamo.
|
||||
if not is_torchdynamo_compiling():
|
||||
warnings.warn(
|
||||
f"{func_name} is deprecated and will be removed soon. {extra_msg}",
|
||||
FutureWarning,
|
||||
)
|
||||
warnings.warn(f"{func_name} is deprecated and will be removed soon. {extra_msg}")
|
||||
|
||||
|
||||
def _validate_tp_mesh_dim(
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import warnings
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch.distributions import constraints
|
||||
|
|
@ -172,15 +171,14 @@ class Distribution:
|
|||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@deprecated(
|
||||
"`sample_n(n)` will be deprecated. Use `sample((n,))` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def sample_n(self, n: int) -> torch.Tensor:
|
||||
"""
|
||||
Generates n samples or n batches of samples if the distribution
|
||||
parameters are batched.
|
||||
"""
|
||||
warnings.warn(
|
||||
"sample_n will be deprecated. Use .sample((n,)) instead", UserWarning
|
||||
)
|
||||
return self.sample(torch.Size((n,)))
|
||||
|
||||
def log_prob(self, value: torch.Tensor) -> torch.Tensor:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
from warnings import warn
|
||||
import inspect
|
||||
from typing_extensions import deprecated
|
||||
from .conflict import ordering, ambiguities, super_signature, AmbiguityWarning
|
||||
from .utils import expand_tuples
|
||||
from .variadic import Variadic, isvariadic
|
||||
|
|
@ -28,21 +27,24 @@ def ambiguity_warn(dispatcher, ambiguities):
|
|||
warn(warning_text(dispatcher.name, ambiguities), AmbiguityWarning)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`halt_ordering` is deprecated, you can safely remove this call.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def halt_ordering():
|
||||
"""Deprecated interface to temporarily disable ordering."""
|
||||
"""Deprecated interface to temporarily disable ordering.
|
||||
"""
|
||||
warn(
|
||||
'halt_ordering is deprecated, you can safely remove this call.',
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`restart_ordering` is deprecated, if you would like to eagerly order the dispatchers, "
|
||||
"you should call the `reorder()` method on each dispatcher.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def restart_ordering(on_ambiguity=ambiguity_warn):
|
||||
"""Deprecated interface to temporarily resume ordering."""
|
||||
"""Deprecated interface to temporarily resume ordering.
|
||||
"""
|
||||
warn(
|
||||
'restart_ordering is deprecated, if you would like to eagerly order'
|
||||
'the dispatchers, you should call the ``reorder()`` method on each'
|
||||
' dispatcher.',
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
|
||||
def variadic_signature_matches_iter(types, full_signature):
|
||||
|
|
@ -314,12 +316,14 @@ class Dispatcher:
|
|||
result = self.funcs[signature]
|
||||
yield result
|
||||
|
||||
@deprecated("`resolve()` is deprecated, use `dispatch(*types)`", category=FutureWarning)
|
||||
def resolve(self, types):
|
||||
""" Determine appropriate implementation for this type signature
|
||||
.. deprecated:: 0.4.4
|
||||
Use ``dispatch(*types)`` instead
|
||||
"""
|
||||
warn("resolve() is deprecated, use dispatch(*types)",
|
||||
DeprecationWarning)
|
||||
|
||||
return self.dispatch(*types)
|
||||
|
||||
def __getstate__(self):
|
||||
|
|
|
|||
10
torch/hub.py
10
torch/hub.py
|
|
@ -13,7 +13,6 @@ import warnings
|
|||
import zipfile
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional, Any
|
||||
from typing_extensions import deprecated
|
||||
from urllib.error import HTTPError, URLError
|
||||
from urllib.request import urlopen, Request
|
||||
from urllib.parse import urlparse # noqa: F401
|
||||
|
|
@ -681,13 +680,10 @@ def _is_legacy_zip_format(filename: str) -> bool:
|
|||
return False
|
||||
|
||||
|
||||
@deprecated(
|
||||
'Falling back to the old format < 1.6. This support will be '
|
||||
'deprecated in favor of default zipfile format introduced in 1.6. '
|
||||
'Please redo torch.save() to save it in the new zipfile format.',
|
||||
category=FutureWarning,
|
||||
)
|
||||
def _legacy_zip_load(filename: str, model_dir: str, map_location: MAP_LOCATION, weights_only: bool) -> Dict[str, Any]:
|
||||
warnings.warn('Falling back to the old format < 1.6. This support will be '
|
||||
'deprecated in favor of default zipfile format introduced in 1.6. '
|
||||
'Please redo torch.save() to save it in the new zipfile format.')
|
||||
# Note: extractall() defaults to overwrite file if exists. No need to clean up beforehand.
|
||||
# We deliberately don't handle tarfile here since our legacy serialization format was in tar.
|
||||
# E.g. resnet18-5c106cde.pth which is widely used.
|
||||
|
|
|
|||
|
|
@ -1094,9 +1094,7 @@ def _script_impl(
|
|||
|
||||
if optimize is not None:
|
||||
warnings.warn(
|
||||
"`optimize` is deprecated and has no effect. "
|
||||
"Use `with torch.jit.optimized_execution()` instead",
|
||||
FutureWarning,
|
||||
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
|
||||
)
|
||||
|
||||
# No-op for modules, functions, class instances that are already scripted
|
||||
|
|
|
|||
|
|
@ -978,9 +978,7 @@ def trace(
|
|||
return func
|
||||
if optimize is not None:
|
||||
warnings.warn(
|
||||
"`optimize` is deprecated and has no effect. "
|
||||
"Use `with torch.jit.optimized_execution()` instead",
|
||||
FutureWarning,
|
||||
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
|
||||
)
|
||||
|
||||
from torch._utils_internal import (
|
||||
|
|
@ -1187,9 +1185,7 @@ def trace_module(
|
|||
return mod
|
||||
if optimize is not None:
|
||||
warnings.warn(
|
||||
"`optimize` is deprecated and has no effect. "
|
||||
"Use `with torch.jit.optimized_execution()` instead",
|
||||
FutureWarning,
|
||||
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
|
||||
)
|
||||
|
||||
var_lookup_fn = _create_interpreter_name_lookup_fn(0)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
from ._ops import OpOverload
|
||||
from typing import Any, Optional, Set, List, Union, Callable, Tuple, Dict, Sequence
|
||||
from typing_extensions import deprecated
|
||||
import traceback
|
||||
import torch
|
||||
import weakref
|
||||
|
|
@ -9,6 +8,7 @@ import inspect
|
|||
import re
|
||||
import contextlib
|
||||
import sys
|
||||
import warnings
|
||||
from torch._library.custom_ops import custom_op, _maybe_get_opdef, device_types_t, CustomOpDef
|
||||
import torch._library as _library
|
||||
|
||||
|
|
@ -451,15 +451,15 @@ def _(lib: Library, name, dispatch_key=""):
|
|||
return wrap
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.library.impl_abstract` was renamed to `torch.library.register_fake`. Please use that "
|
||||
"instead; we will remove `torch.library.impl_abstract` in a future version of PyTorch.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1):
|
||||
r"""This API was renamed to :func:`torch.library.register_fake` in PyTorch 2.4.
|
||||
Please use that instead.
|
||||
"""
|
||||
warnings.warn("torch.library.impl_abstract was renamed to "
|
||||
"torch.library.register_fake. Please use that instead; "
|
||||
"we will remove torch.library.impl_abstract in a future "
|
||||
"version of PyTorch.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
if func is not None:
|
||||
_stacklevel = _stacklevel + 1
|
||||
return register_fake(qualname, func, lib=lib, _stacklevel=_stacklevel)
|
||||
|
|
|
|||
|
|
@ -277,5 +277,5 @@ def spawn(fn, args=(), nprocs=1, join=True, daemon=False, start_method="spawn"):
|
|||
"To use a different start_method use:\n\t\t"
|
||||
" torch.multiprocessing.start_processes(...)"
|
||||
)
|
||||
warnings.warn(msg, FutureWarning)
|
||||
warnings.warn(msg)
|
||||
return start_processes(fn, args, nprocs, join, daemon, start_method="spawn")
|
||||
|
|
|
|||
|
|
@ -1818,8 +1818,7 @@ See :class:`~torch.nn.Softplus` for more details.
|
|||
|
||||
def _get_softmax_dim(name: str, ndim: int, stacklevel: int) -> int:
|
||||
warnings.warn(
|
||||
f"Implicit dimension choice for {name} has been deprecated. "
|
||||
"Change the call to include dim=X as an argument.",
|
||||
f"Implicit dimension choice for {name} has been deprecated. Change the call to include dim=X as an argument.",
|
||||
stacklevel=stacklevel,
|
||||
)
|
||||
if ndim == 0 or ndim == 1 or ndim == 3:
|
||||
|
|
@ -3824,10 +3823,7 @@ def upsample(input, size=None, scale_factor=None, mode="nearest", align_corners=
|
|||
affects the outputs.
|
||||
|
||||
"""
|
||||
warnings.warn(
|
||||
"`nn.functional.upsample` is deprecated. "
|
||||
"Use `nn.functional.interpolate` instead.",
|
||||
)
|
||||
warnings.warn("nn.functional.upsample is deprecated. Use nn.functional.interpolate instead.")
|
||||
return interpolate(input, size, scale_factor, mode, align_corners)
|
||||
|
||||
|
||||
|
|
@ -4147,10 +4143,7 @@ def upsample_nearest(input, size=None, scale_factor=None): # noqa: F811
|
|||
{backward_reproducibility_note}
|
||||
"""
|
||||
# DeprecationWarning is ignored by default
|
||||
warnings.warn(
|
||||
"`nn.functional.upsample_nearest` is deprecated. "
|
||||
"Use `nn.functional.interpolate` instead.",
|
||||
)
|
||||
warnings.warn("nn.functional.upsample_nearest is deprecated. Use nn.functional.interpolate instead.")
|
||||
return interpolate(input, size, scale_factor, mode="nearest")
|
||||
|
||||
|
||||
|
|
@ -4206,10 +4199,7 @@ def upsample_bilinear(input, size=None, scale_factor=None): # noqa: F811
|
|||
{backward_reproducibility_note}
|
||||
"""
|
||||
# DeprecationWarning is ignored by default
|
||||
warnings.warn(
|
||||
"`nn.functional.upsample_bilinear` is deprecated. "
|
||||
"Use `nn.functional.interpolate` instead.",
|
||||
)
|
||||
warnings.warn("nn.functional.upsample_bilinear is deprecated. Use nn.functional.interpolate instead.")
|
||||
return interpolate(input, size, scale_factor, mode="bilinear", align_corners=True)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -599,11 +599,7 @@ def _make_deprecate(meth):
|
|||
old_name = new_name[:-1]
|
||||
|
||||
def deprecated_init(*args, **kwargs):
|
||||
warnings.warn(
|
||||
f"`nn.init.{old_name}` is now deprecated in favor of `nn.init.{new_name}`.",
|
||||
FutureWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
warnings.warn(f"nn.init.{old_name} is now deprecated in favor of nn.init.{new_name}.", stacklevel=2)
|
||||
return meth(*args, **kwargs)
|
||||
|
||||
deprecated_init.__doc__ = fr"""
|
||||
|
|
|
|||
|
|
@ -219,16 +219,10 @@ class Hardtanh(Module):
|
|||
) -> None:
|
||||
super().__init__()
|
||||
if min_value is not None:
|
||||
warnings.warn(
|
||||
"keyword argument `min_value` is deprecated and rename to `min_val`",
|
||||
FutureWarning,
|
||||
)
|
||||
warnings.warn("keyword argument min_value is deprecated and rename to min_val")
|
||||
min_val = min_value
|
||||
if max_value is not None:
|
||||
warnings.warn(
|
||||
"keyword argument `max_value` is deprecated and rename to `max_val`",
|
||||
FutureWarning,
|
||||
)
|
||||
warnings.warn("keyword argument max_value is deprecated and rename to max_val")
|
||||
max_val = max_value
|
||||
|
||||
self.min_val = min_val
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import warnings
|
||||
from collections import OrderedDict, abc as container_abcs
|
||||
from itertools import chain, islice
|
||||
import operator
|
||||
|
|
@ -9,7 +10,6 @@ from torch._jit_internal import _copy_to_script_wrapper
|
|||
|
||||
from typing import Any, Dict, Iterable, Iterator, Mapping, Optional, overload, Tuple, TypeVar, Union
|
||||
from typing_extensions import Self
|
||||
from typing_extensions import deprecated
|
||||
|
||||
__all__ = ['Container', 'Sequential', 'ModuleList', 'ModuleDict', 'ParameterList', 'ParameterDict']
|
||||
|
||||
|
|
@ -29,14 +29,13 @@ def _addindent(s_, numSpaces):
|
|||
return s
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`nn.Container` is deprecated. "
|
||||
"All of it's functionality is now implemented in `nn.Module`. Subclass that instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
class Container(Module):
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
super().__init__()
|
||||
# DeprecationWarning is ignored by default <sigh>
|
||||
warnings.warn("nn.Container is deprecated. All of it's functionality "
|
||||
"is now implemented in nn.Module. Subclass that instead.")
|
||||
for key, value in kwargs.items():
|
||||
self.add_module(key, value)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import math
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
|
@ -12,7 +13,6 @@ from torch._torch_docs import reproducibility_notes
|
|||
|
||||
from ..common_types import _size_1_t, _size_2_t, _size_3_t
|
||||
from typing import Optional, List, Tuple, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d',
|
||||
'LazyConv1d', 'LazyConv2d', 'LazyConv3d', 'LazyConvTranspose1d', 'LazyConvTranspose2d',
|
||||
|
|
@ -40,6 +40,9 @@ convolution_notes = \
|
|||
:math:`(C_\text{in}=C_\text{in}, C_\text{out}=C_\text{in} \times \text{K}, ..., \text{groups}=C_\text{in})`."""} # noqa: B950
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class _ConvNd(Module):
|
||||
|
||||
__constants__ = ['stride', 'padding', 'dilation', 'groups',
|
||||
|
|
@ -607,6 +610,7 @@ class Conv3d(_ConvNd):
|
|||
return self._conv_forward(input, self.weight, self.bias)
|
||||
|
||||
|
||||
|
||||
class _ConvTransposeNd(_ConvNd):
|
||||
def __init__(self, in_channels, out_channels, kernel_size, stride,
|
||||
padding, dilation, transposed, output_padding,
|
||||
|
|
@ -1117,13 +1121,10 @@ class ConvTranspose3d(_ConvTransposeNd):
|
|||
# `_ConvTransposeNd` is really not a mixin anymore (but multiple inheritance as
|
||||
# above would still work).
|
||||
class _ConvTransposeMixin(_ConvTransposeNd):
|
||||
|
||||
@deprecated(
|
||||
"`_ConvTransposeMixin` is a deprecated internal class. "
|
||||
"Please consider using public APIs.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def __init__(self, *args, **kwargs):
|
||||
warnings.warn(
|
||||
"_ConvTransposeMixin is a deprecated internal class. "
|
||||
"Please consider using public APIs.")
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
import warnings
|
||||
|
||||
from .distance import PairwiseDistance
|
||||
from .module import Module
|
||||
from .. import functional as F
|
||||
|
|
@ -5,7 +7,6 @@ from .. import _reduction as _Reduction
|
|||
|
||||
from torch import Tensor
|
||||
from typing import Callable, Optional
|
||||
from typing_extensions import deprecated
|
||||
|
||||
__all__ = ['L1Loss', 'NLLLoss', 'NLLLoss2d', 'PoissonNLLLoss', 'GaussianNLLLoss', 'KLDivLoss',
|
||||
'MSELoss', 'BCELoss', 'BCEWithLogitsLoss', 'HingeEmbeddingLoss', 'MultiLabelMarginLoss',
|
||||
|
|
@ -217,15 +218,12 @@ class NLLLoss(_WeightedLoss):
|
|||
return F.nll_loss(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`NLLLoss2d` has been deprecated. "
|
||||
"Please use `NLLLoss` instead as a drop-in replacement and see "
|
||||
"https://pytorch.org/docs/main/nn.html#torch.nn.NLLLoss for more details.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
class NLLLoss2d(NLLLoss):
|
||||
def __init__(self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100,
|
||||
reduce=None, reduction: str = 'mean') -> None:
|
||||
warnings.warn("NLLLoss2d has been deprecated. "
|
||||
"Please use NLLLoss instead as a drop-in replacement and see "
|
||||
"https://pytorch.org/docs/main/nn.html#torch.nn.NLLLoss for more details.")
|
||||
super().__init__(weight, size_average, ignore_index, reduce, reduction)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1346,8 +1346,7 @@ class Module:
|
|||
warnings.warn("Using non-full backward hooks on a Module that does not return a "
|
||||
"single Tensor or a tuple of Tensors is deprecated and will be removed "
|
||||
"in future versions. This hook will be missing some of the grad_output. "
|
||||
"Please use register_full_backward_hook to get the documented behavior.",
|
||||
FutureWarning)
|
||||
"Please use register_full_backward_hook to get the documented behavior.")
|
||||
return
|
||||
else:
|
||||
result = (result,)
|
||||
|
|
@ -1357,8 +1356,7 @@ class Module:
|
|||
warnings.warn("Using non-full backward hooks on a Module that does not take as input a "
|
||||
"single Tensor or a tuple of Tensors is deprecated and will be removed "
|
||||
"in future versions. This hook will be missing some of the grad_input. "
|
||||
"Please use register_full_backward_hook to get the documented behavior.",
|
||||
FutureWarning)
|
||||
"Please use register_full_backward_hook to get the documented behavior.")
|
||||
return
|
||||
else:
|
||||
inputs = (inputs,)
|
||||
|
|
@ -1368,13 +1366,11 @@ class Module:
|
|||
if len(out_grad_fn) == 0 or (len(out_grad_fn) == 1 and grad_fn not in out_grad_fn):
|
||||
warnings.warn("Using a non-full backward hook when outputs are nested in python data structure "
|
||||
"is deprecated and will be removed in future versions. This hook will be missing "
|
||||
"some grad_output.",
|
||||
FutureWarning)
|
||||
"some grad_output.")
|
||||
elif len(out_grad_fn) > 1:
|
||||
warnings.warn("Using a non-full backward hook when outputs are generated by different autograd Nodes "
|
||||
"is deprecated and will be removed in future versions. This hook will be missing "
|
||||
"some grad_output. Please use register_full_backward_hook to get the documented behavior.",
|
||||
FutureWarning)
|
||||
"some grad_output. Please use register_full_backward_hook to get the documented behavior.")
|
||||
else:
|
||||
# At this point the grad_output part of the hook will most likely be correct
|
||||
inputs_grad_fn = {i.grad_fn for i in inputs if i.grad_fn is not None}
|
||||
|
|
@ -1385,8 +1381,7 @@ class Module:
|
|||
warnings.warn("Using a non-full backward hook when the forward contains multiple autograd Nodes "
|
||||
"is deprecated and will be removed in future versions. This hook will be missing "
|
||||
"some grad_input. Please use register_full_backward_hook to get the documented "
|
||||
"behavior.",
|
||||
FutureWarning)
|
||||
"behavior.")
|
||||
|
||||
def register_forward_pre_hook(
|
||||
self,
|
||||
|
|
@ -1910,9 +1905,7 @@ class Module:
|
|||
warnings.warn(
|
||||
"Positional args are being deprecated, use kwargs instead. Refer to "
|
||||
"https://pytorch.org/docs/main/generated/torch.nn.Module.html#torch.nn.Module.state_dict"
|
||||
" for details.",
|
||||
FutureWarning,
|
||||
)
|
||||
" for details.")
|
||||
|
||||
if destination is None:
|
||||
destination = OrderedDict()
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ import warnings
|
|||
import numbers
|
||||
import weakref
|
||||
from typing import List, Tuple, Optional, overload
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
|
@ -25,11 +24,8 @@ def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Ten
|
|||
return tensor.index_select(dim, permutation)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`apply_permutation` is deprecated, please use `tensor.index_select(dim, permutation)` instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
|
||||
warnings.warn("apply_permutation is deprecated, please use tensor.index_select(dim, permutation) instead")
|
||||
return _apply_permutation(tensor, permutation, dim)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
from typing_extensions import deprecated
|
||||
|
||||
from .parallel_apply import parallel_apply
|
||||
from .replicate import replicate
|
||||
from .data_parallel import DataParallel, data_parallel
|
||||
|
|
@ -9,11 +7,8 @@ from .distributed import DistributedDataParallel
|
|||
__all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
|
||||
'DataParallel', 'DistributedDataParallel']
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.nn.parallel.DistributedDataParallelCPU` is deprecated, "
|
||||
"please use `torch.nn.parallel.DistributedDataParallel` instead.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def DistributedDataParallelCPU(*args, **kwargs):
|
||||
import warnings
|
||||
warnings.warn("torch.nn.parallel.DistributedDataParallelCPU is deprecated, "
|
||||
"please use torch.nn.parallel.DistributedDataParallel instead.")
|
||||
return DistributedDataParallel(*args, **kwargs)
|
||||
|
|
|
|||
|
|
@ -226,9 +226,7 @@ def gather(tensors, dim=0, destination=None, *, out=None):
|
|||
if destination == -1:
|
||||
warnings.warn(
|
||||
'Using -1 to represent CPU tensor is deprecated. Please use a '
|
||||
'device object or string instead, e.g., "cpu".',
|
||||
FutureWarning,
|
||||
)
|
||||
'device object or string instead, e.g., "cpu".')
|
||||
destination = _get_device_index(destination, allow_cpu=True, optional=True)
|
||||
return torch._C._gather(tensors, dim, destination)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -771,8 +771,7 @@ class DistributedDataParallel(Module, Joinable):
|
|||
# do not receive gradients.
|
||||
warnings.warn(
|
||||
"The `check_reduction` argument in `DistributedDataParallel` "
|
||||
"module is deprecated. Please avoid using it.",
|
||||
FutureWarning,
|
||||
"module is deprecated. Please avoid using it."
|
||||
)
|
||||
|
||||
# Check that a module does not have Uninitialized parameters
|
||||
|
|
@ -1467,7 +1466,7 @@ class DistributedDataParallel(Module, Joinable):
|
|||
|
||||
def _should_disable_cpp_reducer(self) -> bool:
|
||||
return self._use_python_reducer and (
|
||||
torch.compiler.is_compiling() or self._force_to_disable_cpp_reducer
|
||||
torch._utils.is_compiling() or self._force_to_disable_cpp_reducer
|
||||
)
|
||||
|
||||
def _pre_forward(self, *inputs, **kwargs):
|
||||
|
|
@ -1480,7 +1479,7 @@ class DistributedDataParallel(Module, Joinable):
|
|||
h.remove()
|
||||
self._accum_grad_hooks.clear()
|
||||
|
||||
if not self._lazy_init_ran and not torch.compiler.is_compiling():
|
||||
if not self._lazy_init_ran and not torch._utils.is_compiling():
|
||||
self._lazy_init()
|
||||
|
||||
if self._delay_all_reduce_all_params:
|
||||
|
|
|
|||
|
|
@ -1,17 +1,13 @@
|
|||
import torch
|
||||
from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar, Union, overload
|
||||
from typing_extensions import deprecated
|
||||
from ._functions import Scatter, Gather
|
||||
import warnings
|
||||
|
||||
__all__ = ['scatter', 'scatter_kwargs', 'gather']
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`is_namedtuple` is deprecated, please use the python checks instead",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def is_namedtuple(obj: Any) -> bool:
|
||||
# Check if type was created from collections.namedtuple or a typing.NamedTuple.
|
||||
warnings.warn("is_namedtuple is deprecated, please use the python checks instead")
|
||||
return _is_namedtuple(obj)
|
||||
|
||||
def _is_namedtuple(obj: Any) -> bool:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import warnings
|
||||
import functools
|
||||
from typing import Union, Iterable, List, Dict, Tuple, Optional, cast
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
|
@ -99,11 +99,6 @@ def clip_grad_norm_(
|
|||
return total_norm
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.nn.utils.clip_grad_norm` is now deprecated "
|
||||
"in favor of `torch.nn.utils.clip_grad_norm_`.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def clip_grad_norm(
|
||||
parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.,
|
||||
error_if_nonfinite: bool = False, foreach: Optional[bool] = None) -> torch.Tensor:
|
||||
|
|
@ -113,6 +108,8 @@ def clip_grad_norm(
|
|||
This method is now deprecated in favor of
|
||||
:func:`torch.nn.utils.clip_grad_norm_`.
|
||||
"""
|
||||
warnings.warn("torch.nn.utils.clip_grad_norm is now deprecated in favor "
|
||||
"of torch.nn.utils.clip_grad_norm_.", stacklevel=2)
|
||||
return clip_grad_norm_(parameters, max_norm, norm_type, error_if_nonfinite, foreach)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import contextlib
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
from typing import Any, Dict, Iterator, Optional, Set, Tuple, Union
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
|
@ -148,12 +148,6 @@ def _reparametrize_module(
|
|||
)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"`torch.nn.utils.stateless.functional_call` is deprecated as of PyTorch 2.0 "
|
||||
"and will be removed in a future version of PyTorch. "
|
||||
"Please use `torch.func.functional_call` instead which is a drop-in replacement.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def functional_call(
|
||||
module: "torch.nn.Module",
|
||||
parameters_and_buffers: Dict[str, Tensor],
|
||||
|
|
@ -222,6 +216,12 @@ def functional_call(
|
|||
Returns:
|
||||
Any: the result of calling ``module``.
|
||||
"""
|
||||
warnings.warn(
|
||||
"This API is deprecated as of PyTorch 2.0 and will be removed in a future "
|
||||
"version of PyTorch. Please use torch.func.functional_call instead "
|
||||
"which is a drop-in replacement for this API."
|
||||
)
|
||||
|
||||
return _functional_call(
|
||||
module,
|
||||
parameters_and_buffers,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ r"""Weight Normalization from https://arxiv.org/abs/1602.07868."""
|
|||
from torch.nn.parameter import Parameter, UninitializedParameter
|
||||
from torch import _weight_norm, norm_except_dim
|
||||
from typing import Any, TypeVar
|
||||
from typing_extensions import deprecated
|
||||
import warnings
|
||||
from ..modules import Module
|
||||
|
||||
__all__ = ['WeightNorm', 'weight_norm', 'remove_weight_norm']
|
||||
|
|
@ -24,12 +24,9 @@ class WeightNorm:
|
|||
return _weight_norm(v, g, self.dim)
|
||||
|
||||
@staticmethod
|
||||
@deprecated(
|
||||
"`torch.nn.utils.weight_norm` is deprecated "
|
||||
"in favor of `torch.nn.utils.parametrizations.weight_norm`.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
def apply(module, name: str, dim: int) -> 'WeightNorm':
|
||||
warnings.warn("torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.")
|
||||
|
||||
for hook in module._forward_pre_hooks.values():
|
||||
if isinstance(hook, WeightNorm) and hook.name == name:
|
||||
raise RuntimeError(f"Cannot register two weight_norm hooks on the same parameter {name}")
|
||||
|
|
|
|||
|
|
@ -254,7 +254,7 @@ def _single_tensor_adadelta(
|
|||
has_complex: bool,
|
||||
):
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices(
|
||||
supports_xla=False
|
||||
)
|
||||
|
|
@ -310,7 +310,7 @@ def _multi_tensor_adadelta(
|
|||
assert not differentiable, "_foreach ops don't support autograd"
|
||||
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices(
|
||||
supports_xla=False
|
||||
)
|
||||
|
|
@ -413,7 +413,7 @@ def adadelta(
|
|||
|
||||
# this check is slow during compilation, so we skip it
|
||||
# if it's strictly needed we can add this check back in dynamo
|
||||
if not torch.compiler.is_compiling() and not all(
|
||||
if not torch._utils.is_compiling() and not all(
|
||||
isinstance(t, torch.Tensor) for t in state_steps
|
||||
):
|
||||
raise RuntimeError(
|
||||
|
|
|
|||
|
|
@ -353,7 +353,7 @@ def _single_tensor_adam(
|
|||
step_t = state_steps[i]
|
||||
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices()
|
||||
assert (
|
||||
param.device.type == step_t.device.type
|
||||
|
|
@ -466,7 +466,7 @@ def _multi_tensor_adam(
|
|||
)
|
||||
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices(
|
||||
supports_xla=False
|
||||
)
|
||||
|
|
@ -743,7 +743,7 @@ def adam(
|
|||
|
||||
# this check is slow during compilation, so we skip it
|
||||
# if it's strictly needed we can add this check back in dynamo
|
||||
if not torch.compiler.is_compiling() and not all(
|
||||
if not torch._utils.is_compiling() and not all(
|
||||
isinstance(t, torch.Tensor) for t in state_steps
|
||||
):
|
||||
raise RuntimeError(
|
||||
|
|
|
|||
|
|
@ -243,7 +243,7 @@ def _single_tensor_adamax(
|
|||
step_t = state_steps[i]
|
||||
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices()
|
||||
assert (
|
||||
param.device.type == step_t.device.type
|
||||
|
|
@ -315,7 +315,7 @@ def _multi_tensor_adamax(
|
|||
return
|
||||
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices(
|
||||
supports_xla=False
|
||||
)
|
||||
|
|
@ -424,7 +424,7 @@ def adamax(
|
|||
See :class:`~torch.optim.Adamax` for details.
|
||||
"""
|
||||
|
||||
if not torch.compiler.is_compiling() and not all(
|
||||
if not torch._utils.is_compiling() and not all(
|
||||
isinstance(t, torch.Tensor) for t in state_steps
|
||||
):
|
||||
raise RuntimeError(
|
||||
|
|
|
|||
|
|
@ -354,7 +354,7 @@ def _single_tensor_adamw(
|
|||
step_t = state_steps[i]
|
||||
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices()
|
||||
assert (
|
||||
param.device.type == step_t.device.type
|
||||
|
|
@ -467,7 +467,7 @@ def _multi_tensor_adamw(
|
|||
)
|
||||
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices(
|
||||
supports_xla=False
|
||||
)
|
||||
|
|
@ -728,7 +728,7 @@ def adamw(
|
|||
|
||||
See :class:`~torch.optim.AdamW` for details.
|
||||
"""
|
||||
if not torch.compiler.is_compiling() and not all(
|
||||
if not torch._utils.is_compiling() and not all(
|
||||
isinstance(t, torch.Tensor) for t in state_steps
|
||||
):
|
||||
raise RuntimeError(
|
||||
|
|
|
|||
|
|
@ -214,7 +214,7 @@ def _single_tensor_asgd(
|
|||
step_t = state_steps[i]
|
||||
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices()
|
||||
assert (
|
||||
param.device.type
|
||||
|
|
@ -287,7 +287,7 @@ def _multi_tensor_asgd(
|
|||
assert not differentiable, "_foreach ops don't support autograd"
|
||||
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices(
|
||||
supports_xla=False
|
||||
)
|
||||
|
|
|
|||
|
|
@ -304,7 +304,7 @@ def _single_tensor_nadam(
|
|||
exp_avg_sq = torch.view_as_real(exp_avg_sq)
|
||||
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices()
|
||||
assert (
|
||||
param.device.type == mu_product.device.type == step_t.device.type
|
||||
|
|
@ -390,7 +390,7 @@ def _multi_tensor_nadam(
|
|||
assert not differentiable, "_foreach ops don't support autograd"
|
||||
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices(
|
||||
supports_xla=False
|
||||
)
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user