Add torch.utils to the docs page, remove dead code and fix docstrings (#105142)

As per title.
Note that the c++ side code for the minidumps part was removed. So trying to call any of these 3 functions today results in an error saying that `torch._C` doesn't have these attributes.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/105142
Approved by: https://github.com/janeyx99
This commit is contained in:
albanD 2023-07-21 00:14:55 +00:00 committed by PyTorch MergeBot
parent 1e87778552
commit e985719e98
9 changed files with 56 additions and 86 deletions

View File

@ -238,7 +238,6 @@ coverage_ignore_classes = [
"StringType",
"SymIntType",
"SymBoolType",
"ThroughputBenchmark",
"TracingState",
"TupleType",
"Type",

View File

@ -131,6 +131,7 @@ Features described in this documentation are classified by release status:
sparse
storage
torch.testing <testing>
torch.utils <utils>
torch.utils.benchmark <benchmark_utils>
torch.utils.bottleneck <bottleneck>
torch.utils.checkpoint <checkpoint>

View File

@ -725,9 +725,6 @@ Operator Tags
.. py:module:: torch.contrib
.. py:module:: torch.utils.backcompat
.. This submodule is split manually without a top level page.
.. py:module:: torch.utils
.. This module is only used internally for ROCm builds.
.. py:module:: torch.utils.hipify
@ -735,14 +732,3 @@ Operator Tags
.. for tracking purposes
.. py:module:: torch.utils.model_dump
.. py:module:: torch.utils.viz
.. automodule:: torch.autograd
.. currentmodule:: torch.autograd
Engine Configuration
----------------------------------
.. autosummary::
:toctree: generated
:nosignatures:
set_multithreading_enabled

13
docs/source/utils.rst Normal file
View File

@ -0,0 +1,13 @@
torch.utils
===================================
.. automodule:: torch.utils
.. currentmodule:: torch.utils
.. autosummary::
:toctree: generated
:nosignatures:
rename_privateuse1_backend
generate_methods_for_privateuse1_backend
get_cpp_backtrace
set_module

View File

@ -998,11 +998,6 @@
"Tuple",
"Union"
],
"torch.utils": [
"disable_minidumps",
"enable_minidumps",
"enable_minidumps_on_exceptions"
],
"torch.utils.benchmark.utils.compare": [
"Colorize",
"Table",

View File

@ -1905,9 +1905,6 @@ def _c10d_init() -> _bool: ...
# Defined in torch/csrc/distributed/rpc/testing/init.cpp
def _faulty_agent_init() -> _bool: ...
def _enable_minidumps(directory: str) -> None: ...
def _disable_minidumps() -> None: ...
def _enable_minidumps_on_exceptions() -> None: ...
def _register_py_class_for_device(device: str, cls: Any) -> None: ...
def _activate_cuda_trace() -> None: ...

View File

@ -2,12 +2,13 @@ import os.path as _osp
import torch
from .throughput_benchmark import ThroughputBenchmark
from ._crash_handler import enable_minidumps, disable_minidumps, enable_minidumps_on_exceptions
from .cpp_backtrace import get_cpp_backtrace
from .backend_registration import rename_privateuse1_backend, generate_methods_for_privateuse1_backend
# Set the module for a given object for nicer printing
def set_module(obj, mod):
"""
Set the module attribute on a python object for a given object for nicer printing
"""
if not isinstance(mod, str):
raise TypeError("The mod argument should be a string")
obj.__module__ = mod

View File

@ -1,25 +0,0 @@
import os
import sys
import pathlib
import torch
DEFAULT_MINIDUMP_DIR = "/tmp/pytorch_crashes"
if sys.platform == "win32":
DEFAULT_MINIDUMP_DIR = str(pathlib.Path.home() / "AppData" / "pytorch_crashes")
def enable_minidumps(directory=DEFAULT_MINIDUMP_DIR):
if directory == DEFAULT_MINIDUMP_DIR:
pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
elif not os.path.exists(directory):
raise RuntimeError(f"Directory does not exist: {directory}")
torch._C._enable_minidumps(directory)
def enable_minidumps_on_exceptions():
torch._C._enable_minidumps_on_exceptions()
def disable_minidumps():
torch._C._disable_minidumps()

View File

@ -14,11 +14,11 @@ def rename_privateuse1_backend(backend_name: str) -> None:
r"""
rename_privateuse1_backend(backend_name) -> None
Note: support the custom device with privateuse1
This is a registration API for external backends that would like to register their
own device and C++ kernels out of tree.
This API should be use to rename the privateuse1 backend device to make
it more convenient to use as a device name within PyTorch APIs.
The steps are:
(1) (In C++) implement kernels for various torch operations, and register them
to the PrivateUse1 dispatch key.
(2) (In python) call torch.register_privateuse1_backend("foo")
@ -29,52 +29,54 @@ def rename_privateuse1_backend(backend_name: str) -> None:
the external backend after it's already been set will result in an error.
Note(AMP): If you want to support AMP on your device, you can register a custom backend module.
The backend must register a custom backend module with `torch._register_device_module("foo", BackendModule)`.
The backend must register a custom backend module with ``torch._register_device_module("foo", BackendModule)``.
BackendModule needs to have the following API's:
(1) get_amp_supported_dtype() -> List[torch.dtype]
get the supported dtypes on your `foo` device in AMP, maybe the `foo` device supports one more dtype.
(1) ``get_amp_supported_dtype() -> List[torch.dtype]``
get the supported dtypes on your "foo" device in AMP, maybe the "foo" device supports one more dtype.
(2) is_autocast_enabled() -> bool
check the AMP is enabled or not on your `foo` device.
(2) ``is_autocast_enabled() -> bool``
check the AMP is enabled or not on your "foo" device.
(3) get_autocast_dtype() -> torch.dtype
get the supported dtype on your `foo` device in AMP, which is set by `set_autocast_dtype` or the
default dtype, and the default dtype is `torch.float16`.
(3) ``get_autocast_dtype() -> torch.dtype``
get the supported dtype on your "foo" device in AMP, which is set by ``set_autocast_dtype`` or the
default dtype, and the default dtype is ``torch.float16``.
(4) set_autocast_enabled(bool) -> None
enable the AMP or not on your `foo` device.
(4) ``set_autocast_enabled(bool) -> None``
enable the AMP or not on your "foo" device.
(5) set_autocast_dtype(dtype) -> None
set the supported dtype on your `foo` device in AMP, and the dtype be contained in the dtypes got
from `get_amp_supported_dtype`.
(5) ``set_autocast_dtype(dtype) -> None``
set the supported dtype on your "foo" device in AMP, and the dtype be contained in the dtypes got
from ``get_amp_supported_dtype``.
Note(random): If you want to support to set seed for your device, BackendModule needs to have the following API's:
(1) _is_in_bad_fork() -> bool
Return `True` if now it is in bad_fork, else return `False`.
(1) ``_is_in_bad_fork() -> bool``
Return ``True`` if now it is in bad_fork, else return ``False``.
(2) manual_seed_all(seed: int) -> None
(2) ``manual_seed_all(seed int) -> None``
Sets the seed for generating random numbers for your devices.
(3) device_count() -> int:
Returns the number of `foo`s available.
(3) ``device_count() -> int``
Returns the number of "foo"s available.
(4) get_rng_state(device: Union[int, str, torch.device] = 'foo') -> Tensor:
(4) ``get_rng_state(device: Union[int, str, torch.device] = 'foo') -> Tensor``
Returns a list of ByteTensor representing the random number states of all devices.
(5) set_rng_state(new_state: Tensor, device: Union[int, str, torch.device] = 'foo') -> None:
Sets the random number generator state of the specified `foo` device.
(5) ``set_rng_state(new_state: Tensor, device: Union[int, str, torch.device] = 'foo') -> None``
Sets the random number generator state of the specified "foo" device.
And there are some common funcs:
(1) is_available() -> bool:
Returns a bool indicating if `foo` is currently available.
(1) ``is_available() -> bool``
Returns a bool indicating if "foo" is currently available.
(2) ``current_device() -> int``
Returns the index of a currently selected device.
For more details, see https://pytorch.org/tutorials/advanced/extend_dispatcher.html#get-a-dispatch-key-for-your-backend
For an existing example, see https://github.com/bdhirsh/pytorch_open_registration_example
(2) current_device() -> int:
Returns the index of a currently selected device.
Example::
>>> # xdoctest: +SKIP("failing")
@ -82,6 +84,7 @@ def rename_privateuse1_backend(backend_name: str) -> None:
# This will work, assuming that you've implemented the right C++ kernels
# to implement torch.ones.
>>> a = torch.ones(2, device="foo")
"""
_rename_privateuse1_backend(backend_name)
global _privateuse1_backend_name
@ -259,13 +262,6 @@ def generate_methods_for_privateuse1_backend(for_tensor: bool = True, for_module
r"""
generate_methods_for_privateuse1_backend(for_tensor, for_module, for_storage, unsupported_dtype) -> None
Args:
for_tensor (bool): whether register related methods for torch.Tensor class.
for_module (bool): whether register related methods for torch.nn.Module class.
for_storage (bool): whether register related methods for torch.Storage class.
unsupported_dtype(List[torch.dtype]): takes effect only when the storage method needs to be generated,
indicating that the storage does not support the torch.dtype type.
Automatically generate attributes and methods for the custom backend after rename privateuse1 backend.
In the default scenario, storage-related methods will not be generated automatically.
@ -279,6 +275,13 @@ def generate_methods_for_privateuse1_backend(for_tensor: bool = True, for_module
and so will not be properly typed. For Storage methods generate, if you need to support sparse data storage,
you need to extend the implementation yourself.
Args:
for_tensor (bool): whether register related methods for torch.Tensor class.
for_module (bool): whether register related methods for torch.nn.Module class.
for_storage (bool): whether register related methods for torch.Storage class.
unsupported_dtype (List[torch.dtype]): takes effect only when the storage method needs to be generated,
indicating that the storage does not support the torch.dtype type.
Example::
>>> # xdoctest: +SKIP("failing")