#define TORCH_ASSERT_ONLY_METHOD_OPERATORS // ${generated_comment} // Python bindings for torch.* functions implemented through ATen. // // The functions are bound as static methods on a class // torch._C._VariableFunctions which is also aliased as Variable._torch // and also copied into 'torch' module. #include // Undefine the copysign macro so that at::copysign works as intended with MSVC // https://github.com/python/cpython/blob/c60394c7fc9cc09b16e9675a3eeb5844b6d8523f/PC/pyconfig.h#L196 #ifdef _MSC_VER #undef copysign #endif // _MSC_VER #include "torch/csrc/autograd/python_torch_functions.h" #include "torch/csrc/autograd/python_variable.h" #include "torch/csrc/autograd/utils/wrap_outputs.h" #include "torch/csrc/Dtype.h" #include "torch/csrc/DynamicTypes.h" #include "torch/csrc/Exceptions.h" #include "torch/csrc/utils/out_types.h" #include "torch/csrc/utils/pybind.h" #include "torch/csrc/utils/pycfunction_helpers.h" #include "torch/csrc/utils/python_arg_parser.h" #include "torch/csrc/utils/tensor_layouts.h" #include "torch/csrc/utils/tensor_new.h" #include "torch/csrc/utils/tensor_numpy.h" #include "torch/csrc/jit/frontend/tracer.h" #include "torch/csrc/autograd/generated/variable_factories.h" #include "torch/csrc/utils/structseq.h" #include "torch/csrc/utils/cuda_lazy_init.h" #include "torch/csrc/autograd/python_return_types.h" #include #ifndef AT_PER_OPERATOR_HEADERS #include #else $ops_headers #endif #include #include #include #include using at::Tensor; using at::Device; using at::Layout; using at::Scalar; using at::ScalarType; using at::Backend; using at::OptionalDeviceGuard; using at::DeviceGuard; using at::TensorOptions; using at::IntArrayRef; using at::Generator; using at::TensorList; using at::Dimname; using at::DimnameList; using at::ArrayRef; using torch::utils::check_out_type_matches; using namespace torch::autograd::utils; // NOTE: See [Sharded File] comment in VariableType namespace torch { namespace autograd { // generated forward declarations start here ${py_forwards} static PyMethodDef torch_functions_shard[] = { ${py_method_defs} }; void gatherTorchFunctions${shard_id}(std::vector &torch_functions) { constexpr size_t num_functions = sizeof(torch_functions_shard) / sizeof(torch_functions_shard[0]); torch_functions.insert( torch_functions.end(), torch_functions_shard, torch_functions_shard + num_functions); } // generated methods start here ${py_methods} }} // namespace torch::autograd