mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: This PR creates a new namespace, torch.fft (torch::fft) and puts a single function, fft, in it. This function is analogous to is a simplified version of NumPy's [numpy.fft.fft](https://numpy.org/doc/1.18/reference/generated/numpy.fft.fft.html?highlight=fft#numpy.fft.fft) that accepts no optional arguments. It is intended to demonstrate how to add and document functions in the namespace, and is not intended to deprecate the existing torch.fft function. Adding this namespace was complicated by the existence of the torch.fft function in Python. Creating a torch.fft Python module makes this name ambiguous: does it refer to a function or module? If the JIT didn't exist, a solution to this problem would have been to make torch.fft refer to a callable class that mimicked both the function and module. The JIT, however, cannot understand this pattern. As a workaround it's required to explicitly `import torch.fft` to access the torch.fft.fft function in Python: ``` import torch.fft t = torch.randn(128, dtype=torch.cdouble) torch.fft.fft(t) ``` See https://github.com/pytorch/pytorch/issues/42175 for future work. Another possible future PR is to get the JIT to understand torch.fft as a callable class so it need not be imported explicitly to be used. Pull Request resolved: https://github.com/pytorch/pytorch/pull/41911 Reviewed By: glaringlee Differential Revision: D22941894 Pulled By: mruberry fbshipit-source-id: c8e0b44cbe90d21e998ca3832cf3a533f28dbe8d
146 lines
5.0 KiB
Python
146 lines
5.0 KiB
Python
import math
|
|
import warnings
|
|
|
|
import torch
|
|
import torch.backends.cudnn as cudnn
|
|
|
|
from torch._six import PY37
|
|
from ..nn.modules.utils import _single, _pair, _triple, _quadruple, _list_with_default
|
|
|
|
from collections import OrderedDict
|
|
|
|
|
|
_builtin_table = None
|
|
|
|
_modules_containing_builtins = (torch, torch._C._nn, torch._C._fft)
|
|
|
|
_builtin_ops = [
|
|
# Pairs of (function, op_name)
|
|
(_pair, "aten::_pair"),
|
|
(_quadruple, "aten::_quadruple"),
|
|
(_single, "aten::_single"),
|
|
(_triple, "aten::_triple"),
|
|
(_list_with_default, "aten::list_with_default"),
|
|
(OrderedDict, "aten::dict"),
|
|
(dict, "aten::dict"),
|
|
(cudnn.is_acceptable, "aten::cudnn_is_acceptable"),
|
|
(math.ceil, "aten::ceil"),
|
|
(math.copysign, "aten::copysign"),
|
|
(math.erf, "aten::erf"),
|
|
(math.erfc, "aten::erfc"),
|
|
(math.exp, "aten::exp"),
|
|
(math.expm1, "aten::expm1"),
|
|
(math.fabs, "aten::fabs"),
|
|
(math.floor, "aten::floor"),
|
|
(math.gamma, "aten::gamma"),
|
|
(math.lgamma, "aten::lgamma"),
|
|
(math.log, "aten::log"),
|
|
(math.log10, "aten::log10"),
|
|
(math.log1p, "aten::log1p"),
|
|
(math.pow, "aten::pow"),
|
|
(math.sqrt, "aten::sqrt"),
|
|
(math.isnan, "aten::isnan"),
|
|
(math.asinh, "aten::asinh"),
|
|
(math.atanh, "aten::atanh"),
|
|
(math.cosh, "aten::cosh"),
|
|
(math.sinh, "aten::sinh"),
|
|
(math.tanh, "aten::tanh"),
|
|
(math.acos, "aten::acos"),
|
|
(math.asin, "aten::asin"),
|
|
(math.atan, "aten::atan"),
|
|
(math.atan2, "aten::atan2"),
|
|
(math.cos, "aten::cos"),
|
|
(math.sin, "aten::sin"),
|
|
(math.tan, "aten::tan"),
|
|
(math.asinh, "aten::asinh"),
|
|
(math.atanh, "aten::atanh"),
|
|
(math.acosh, "aten::acosh"),
|
|
(math.sinh, "aten::sinh"),
|
|
(math.cosh, "aten::cosh"),
|
|
(math.tanh, "aten::tanh"),
|
|
(math.fmod, "aten::fmod"),
|
|
(math.modf, "aten::modf"),
|
|
(math.factorial, "aten::factorial"),
|
|
(math.frexp, "aten::frexp"),
|
|
(math.isnan, "aten::isnan"),
|
|
(math.isinf, "aten::isinf"),
|
|
(math.degrees, "aten::degrees"),
|
|
(math.radians, "aten::radians"),
|
|
(math.ldexp, "aten::ldexp"),
|
|
(torch.autograd.grad, "aten::grad"),
|
|
(torch.autograd.backward, "aten::backward"),
|
|
(torch._C._infer_size, "aten::_infer_size"),
|
|
(torch.nn.functional._no_grad_embedding_renorm_, "aten::_no_grad_embedding_renorm_"),
|
|
(torch.nn.functional.assert_int_or_pair, "aten::_assert_int_or_pair"),
|
|
(torch.nn.init._no_grad_fill_, "aten::_no_grad_fill_"),
|
|
(torch.nn.init._no_grad_normal_, "aten::_no_grad_normal_"),
|
|
(torch.nn.init._no_grad_uniform_, "aten::_no_grad_uniform_"),
|
|
(torch.nn.init._no_grad_zero_, "aten::_no_grad_zero_"),
|
|
(torch._C._get_tracing_state, "aten::_get_tracing_state"),
|
|
(warnings.warn, "aten::warn"),
|
|
(torch._VF.stft, "aten::stft"),
|
|
(torch._VF.istft, "aten::istft"),
|
|
(torch._VF.cdist, "aten::cdist"),
|
|
(torch._VF.norm, "aten::norm"),
|
|
(torch._VF.unique_dim, "aten::unique_dim"),
|
|
(torch._VF.unique_consecutive, "aten::unique_consecutive"),
|
|
(torch._VF.nuclear_norm, "aten::nuclear_norm"),
|
|
(torch._VF.frobenius_norm, "aten::frobenius_norm"),
|
|
]
|
|
|
|
# ops in torch.functional are bound to torch
|
|
# in these cases, we want to resolve the function to their python implementation
|
|
# instead looking up a builtin "aten::" schema
|
|
|
|
def _gen_torch_functional_registered_ops():
|
|
# eventually ops should encompass all of torch/functional.py, (torch.functional.__all__)
|
|
# but we are currently only able to compile some of the functions. additionally,
|
|
# some functions directly map to their aten:: implementations.
|
|
# TODO: add support for more ops
|
|
ops = ["stft", "istft", "lu", "lu_unpack", "cdist", "norm", "unique", "unique_consecutive"]
|
|
return set(getattr(torch.functional, name) for name in ops)
|
|
|
|
_functional_registered_ops = _gen_torch_functional_registered_ops()
|
|
|
|
def _is_special_functional_bound_op(fn):
|
|
return fn in _functional_registered_ops
|
|
|
|
# lazily built to ensure the correct initialization order
|
|
def _get_builtin_table():
|
|
global _builtin_table
|
|
if _builtin_table is not None:
|
|
return _builtin_table
|
|
_builtin_table = {}
|
|
|
|
def register_all(mod):
|
|
for name in dir(mod):
|
|
v = getattr(mod, name)
|
|
if callable(v) and not _is_special_functional_bound_op(v):
|
|
_builtin_ops.append((v, "aten::" + name))
|
|
for mod in _modules_containing_builtins:
|
|
register_all(mod)
|
|
|
|
_builtin_ops.append((math.gcd, "aten::gcd"))
|
|
_builtin_ops.append((math.isfinite, "aten::isfinite"))
|
|
if PY37:
|
|
_builtin_ops.append((math.remainder, "aten::mathremainder"))
|
|
|
|
import torch.distributed.autograd as dist_autograd
|
|
if dist_autograd.is_available():
|
|
_builtin_ops.append((dist_autograd.get_gradients, "aten::get_gradients"))
|
|
_builtin_ops.append((dist_autograd.backward, "aten::dist_backward"))
|
|
|
|
# populate the _builtin_table from _builtin_ops
|
|
for builtin, aten_op in _builtin_ops:
|
|
_builtin_table[id(builtin)] = aten_op
|
|
|
|
return _builtin_table
|
|
|
|
|
|
def _register_builtin(fn, op):
|
|
_get_builtin_table()[id(fn)] = op
|
|
|
|
|
|
def _find_builtin(fn):
|
|
return _get_builtin_table().get(id(fn))
|