pytorch/tools/autograd/templates/python_fft_functions.cpp
Yu, Guangye 5c46600f84 [RELAND] refactor lazy init to device-agnostic (#119248)
# Motivation
This PR intends to extend `cuda_lazy_init` to `device_lazy_init` which is a device-agnostic API that can support any backend. And change `maybe_initialize_cuda` to `maybe_initialize_device` to support lazy initialization for CUDA while maintaining scalability.

# Design
We maintain a flag for each backend to manage the lazy initialization state separately.

# Additional Context
No need more UTs.
This is a reland PR, the original PR is [refactor lazy init to device-agnostic](https://github.com/pytorch/pytorch/pull/118846).
This is a common PR, and does not trigger xpu ciflow.

Differential Revision: [D53478332](https://our.internmc.facebook.com/intern/diff/D53478332)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/119248
Approved by: https://github.com/EikanWang, https://github.com/gujinghui, https://github.com/jgong5, https://github.com/atalman
2024-02-07 15:58:51 +00:00

82 lines
1.9 KiB
C++

#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
// ${generated_comment}
#include "torch/csrc/Device.h"
#include "torch/csrc/DynamicTypes.h"
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/autograd/python_fft_functions.h"
#include "torch/csrc/autograd/generated/python_return_types.h"
#include "torch/csrc/autograd/python_variable.h"
#include "torch/csrc/autograd/utils/wrap_outputs.h"
#include "torch/csrc/autograd/utils/python_arg_parsing.h"
#include "torch/csrc/autograd/generated/variable_factories.h"
#include "torch/csrc/utils/out_types.h"
#include "torch/csrc/utils/pycfunction_helpers.h"
#include "torch/csrc/utils/python_arg_parser.h"
#include "torch/csrc/utils/structseq.h"
#include "torch/csrc/utils/device_lazy_init.h"
#include <ATen/core/Tensor.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
$ops_headers
#endif
using at::Tensor;
using at::Device;
using at::Layout;
using at::Scalar;
using at::ScalarType;
using at::Backend;
using at::OptionalDeviceGuard;
using at::DeviceGuard;
using at::TensorOptions;
using at::IntArrayRef;
using at::Generator;
using at::TensorList;
using at::Dimname;
using at::DimnameList;
using torch::utils::check_out_type_matches;
using namespace torch::autograd::utils;
namespace torch::autograd {
// generated forward declarations start here
${py_forwards}
static PyMethodDef fft_functions[] = {
${py_method_defs}
{NULL}
};
static PyObject* THPFFTVariableFunctionsModule = NULL;
void initFFTFunctions(PyObject* module) {
static struct PyModuleDef def = {
PyModuleDef_HEAD_INIT,
"torch._C._fft",
NULL,
-1,
fft_functions
};
PyObject* fft = PyModule_Create(&def);
THPFFTVariableFunctionsModule = fft;
if (!fft) {
throw python_error();
}
// steals a reference to fft
if (PyModule_AddObject(module, "_fft", fft) != 0) {
throw python_error();
}
}
// generated methods start here
${py_methods}
} // namespace torch::autograd