pytorch/torch/csrc/utils/tensor_layouts.cpp
Dylan Bespalko 849c32f8e9 Cpu-strided-complex support for binary-ops (#25534)
Summary:
In-tree changes to pytorch to support complex numbers are being submitted here.
Out-of-tree support for complex numbers is here: [pytorch-cpu-strided-complex extension](https://gitlab.com/pytorch-complex/pytorch-cpu-strided-complex)

Note: These changes do not support AVX/SSE operations on complex tensors.
Changes so far:

- [x]  Added complex support of torch.empty.
- [x]  Added complex support of CopyKernels
- [x]  Added complex support of BinaryOp kernels

Once these changes are applied the rest of the kernels are pretty easy.

ezyang
I have fixed the issues in the original [PR: 25373](https://github.com/pytorch/pytorch/pull/25373).
Pull Request resolved: https://github.com/pytorch/pytorch/pull/25534

Differential Revision: D17188390

Pulled By: ezyang

fbshipit-source-id: ade9fb00b2caa89b0f66a4de70a662b62db13a8c
2019-09-04 13:20:52 -07:00

47 lines
1.9 KiB
C++

#include <torch/csrc/utils/tensor_layouts.h>
#include <ATen/Layout.h>
#include <c10/core/ScalarType.h>
#include <torch/csrc/DynamicTypes.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/Layout.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/object_ptr.h>
namespace torch { namespace utils {
void initializeLayouts() {
auto torch_module = THPObjectPtr(PyImport_ImportModule("torch"));
if (!torch_module) throw python_error();
PyObject *strided_layout = THPLayout_New(at::Layout::Strided, "torch.strided");
Py_INCREF(strided_layout);
if (PyModule_AddObject(torch_module, "strided", strided_layout) != 0) {
throw python_error();
}
// for now, let's look these up by Backend; we could create our own enum in the future.
registerLayoutObject((THPLayout*)strided_layout, at::Backend::CPU);
registerLayoutObject((THPLayout*)strided_layout, at::Backend::CUDA);
registerLayoutObject((THPLayout*)strided_layout, at::Backend::MSNPU);
registerLayoutObject((THPLayout*)strided_layout, at::Backend::XLA);
registerLayoutObject((THPLayout*)strided_layout, at::Backend::QuantizedCPU);
PyObject *sparse_coo_layout = THPLayout_New(at::Layout::Sparse, "torch.sparse_coo");
Py_INCREF(sparse_coo_layout);
if (PyModule_AddObject(torch_module, "sparse_coo", sparse_coo_layout) != 0) {
throw python_error();
}
registerLayoutObject((THPLayout*)sparse_coo_layout, at::Backend::SparseCPU);
registerLayoutObject((THPLayout*)sparse_coo_layout, at::Backend::SparseCUDA);
PyObject *mkldnn_layout = THPLayout_New(at::Layout::Mkldnn, "torch._mkldnn");
Py_INCREF(mkldnn_layout);
if (PyModule_AddObject(torch_module, "_mkldnn", mkldnn_layout) != 0) {
throw python_error();
}
registerLayoutObject((THPLayout*)mkldnn_layout, at::Backend::MkldnnCPU);
registerLayoutObject((THPLayout*)strided_layout, at::Backend::ComplexCPU);
registerLayoutObject((THPLayout*)strided_layout, at::Backend::ComplexCUDA);
}
}} // namespace torch::utils