mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/37081 Closes https://github.com/pytorch/pytorch/issues/30813 Relanding of https://github.com/pytorch/pytorch/pull/35463 1. Tensor quantization logic(quantize_*) is moved to the aten/native/quantized. Previously all logic for tensor quantization lived in the aten/quantized/Quantizer.cpp file, and started to become complicated and hard to read. This problem should be addressed in refactoring PR. Still, I reworked this partially because I had to add tensor quantization logic for CUDA, and it was native to move everything to the aten/native/quantized. 2. Requirements to run CUDA_tensor_apply* was eased to process any tenser that lives on the CUDA device(QuantizedCUDA included). 3. All quantized data types now have a default constructor. NVCC refuses to compile any gpu_kernel or CUDA_tensor_apply* without them. 4. Minor changes in many files to register QuantizedCUDA backend. 5. test_quantized_tensor is extended to process QuantizedCUDA backend where possible. Test Plan: Imported from OSS Differential Revision: D21206694 Pulled By: jerryzh168 fbshipit-source-id: c7433aad9c095a34c57e6dddd128b5c5d9292373
46 lines
1.9 KiB
C++
46 lines
1.9 KiB
C++
#include <torch/csrc/utils/tensor_layouts.h>
|
|
#include <ATen/Layout.h>
|
|
#include <c10/core/ScalarType.h>
|
|
#include <torch/csrc/DynamicTypes.h>
|
|
#include <torch/csrc/Exceptions.h>
|
|
#include <torch/csrc/Layout.h>
|
|
#include <torch/csrc/python_headers.h>
|
|
#include <torch/csrc/utils/object_ptr.h>
|
|
|
|
namespace torch { namespace utils {
|
|
|
|
void initializeLayouts() {
|
|
auto torch_module = THPObjectPtr(PyImport_ImportModule("torch"));
|
|
if (!torch_module) throw python_error();
|
|
|
|
PyObject *strided_layout = THPLayout_New(at::Layout::Strided, "torch.strided");
|
|
Py_INCREF(strided_layout);
|
|
if (PyModule_AddObject(torch_module, "strided", strided_layout) != 0) {
|
|
throw python_error();
|
|
}
|
|
// for now, let's look these up by Backend; we could create our own enum in the future.
|
|
registerLayoutObject((THPLayout*)strided_layout, at::Backend::CPU);
|
|
registerLayoutObject((THPLayout*)strided_layout, at::Backend::CUDA);
|
|
registerLayoutObject((THPLayout*)strided_layout, at::Backend::MSNPU);
|
|
registerLayoutObject((THPLayout*)strided_layout, at::Backend::XLA);
|
|
registerLayoutObject((THPLayout*)strided_layout, at::Backend::QuantizedCPU);
|
|
registerLayoutObject((THPLayout*)strided_layout, at::Backend::QuantizedCUDA);
|
|
|
|
PyObject *sparse_coo_layout = THPLayout_New(at::Layout::Sparse, "torch.sparse_coo");
|
|
Py_INCREF(sparse_coo_layout);
|
|
if (PyModule_AddObject(torch_module, "sparse_coo", sparse_coo_layout) != 0) {
|
|
throw python_error();
|
|
}
|
|
registerLayoutObject((THPLayout*)sparse_coo_layout, at::Backend::SparseCPU);
|
|
registerLayoutObject((THPLayout*)sparse_coo_layout, at::Backend::SparseCUDA);
|
|
|
|
PyObject *mkldnn_layout = THPLayout_New(at::Layout::Mkldnn, "torch._mkldnn");
|
|
Py_INCREF(mkldnn_layout);
|
|
if (PyModule_AddObject(torch_module, "_mkldnn", mkldnn_layout) != 0) {
|
|
throw python_error();
|
|
}
|
|
registerLayoutObject((THPLayout*)mkldnn_layout, at::Backend::MkldnnCPU);
|
|
}
|
|
|
|
}} // namespace torch::utils
|