mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/68693 Generation of python bindings for native functions is split over 8 different files. One for each namespace, with the torch namespace split into 3 shards, and methods in their own file as well. This change ensures that editing any single (non-method) operator only causes one of these files to be rebuilt. Test Plan: Imported from OSS Reviewed By: jbschlosser Differential Revision: D32596270 Pulled By: albanD fbshipit-source-id: 0570ec69e7476b8f1bc21138ba18fe8f95ebbe3f
82 lines
1.9 KiB
C++
82 lines
1.9 KiB
C++
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
|
// ${generated_comment}
|
|
|
|
#include "torch/csrc/Device.h"
|
|
#include "torch/csrc/DynamicTypes.h"
|
|
#include "torch/csrc/Exceptions.h"
|
|
#include "torch/csrc/autograd/python_fft_functions.h"
|
|
#include "torch/csrc/autograd/python_return_types.h"
|
|
#include "torch/csrc/autograd/python_variable.h"
|
|
#include "torch/csrc/autograd/utils/wrap_outputs.h"
|
|
#include "torch/csrc/autograd/utils/python_arg_parsing.h"
|
|
#include "torch/csrc/autograd/generated/variable_factories.h"
|
|
#include "torch/csrc/utils/out_types.h"
|
|
#include "torch/csrc/utils/pycfunction_helpers.h"
|
|
#include "torch/csrc/utils/python_arg_parser.h"
|
|
#include "torch/csrc/utils/structseq.h"
|
|
#include "torch/csrc/utils/cuda_lazy_init.h"
|
|
|
|
#include <ATen/core/Tensor.h>
|
|
|
|
#ifndef AT_PER_OPERATOR_HEADERS
|
|
#include <ATen/Functions.h>
|
|
#else
|
|
$ops_headers
|
|
#endif
|
|
|
|
using at::Tensor;
|
|
using at::Device;
|
|
using at::Layout;
|
|
using at::Scalar;
|
|
using at::ScalarType;
|
|
using at::Backend;
|
|
using at::OptionalDeviceGuard;
|
|
using at::DeviceGuard;
|
|
using at::TensorOptions;
|
|
using at::IntArrayRef;
|
|
using at::Generator;
|
|
using at::TensorList;
|
|
using at::Dimname;
|
|
using at::DimnameList;
|
|
|
|
using torch::utils::check_out_type_matches;
|
|
using namespace torch::autograd::utils;
|
|
|
|
namespace torch { namespace autograd {
|
|
|
|
// generated forward declarations start here
|
|
|
|
${py_forwards}
|
|
|
|
static PyMethodDef fft_functions[] = {
|
|
${py_method_defs}
|
|
{NULL}
|
|
};
|
|
|
|
static PyObject* THPFFTVariableFunctionsModule = NULL;
|
|
|
|
void initFFTFunctions(PyObject* module) {
|
|
static struct PyModuleDef def = {
|
|
PyModuleDef_HEAD_INIT,
|
|
"torch._C._fft",
|
|
NULL,
|
|
-1,
|
|
fft_functions
|
|
};
|
|
PyObject* fft = PyModule_Create(&def);
|
|
THPFFTVariableFunctionsModule = fft;
|
|
if (!fft) {
|
|
throw python_error();
|
|
}
|
|
// steals a reference to fft
|
|
if (PyModule_AddObject(module, "_fft", fft) != 0) {
|
|
throw python_error();
|
|
}
|
|
}
|
|
|
|
// generated methods start here
|
|
|
|
${py_methods}
|
|
|
|
}} // namespace torch::autograd
|