From 705d80b51e66d765da401e86244d91c4e130a482 Mon Sep 17 00:00:00 2001 From: Gregory Chanan Date: Fri, 5 Oct 2018 11:05:43 -0700 Subject: [PATCH] Remove some Type.tensor usages and remove native_tensor without size. (#12355) Summary: This is to move us along the path to removing Type from the public API. Pull Request resolved: https://github.com/pytorch/pytorch/pull/12355 Reviewed By: ezyang Differential Revision: D10212616 Pulled By: gchanan fbshipit-source-id: c9cd128d1111ab219cb0b2f3bf5b632502ab97c0 --- aten/src/ATen/SparseTensorImpl.cpp | 10 ++++---- aten/src/ATen/native/LegacyBridge.cpp | 2 +- aten/src/ATen/native/TensorFactories.cpp | 24 +++++++++---------- aten/src/ATen/native/cuda/TensorFactories.cu | 2 +- aten/src/ATen/native/native_functions.yaml | 7 ------ aten/src/ATen/native/sparse/SparseTensor.cpp | 2 +- .../ATen/native/sparse/SparseTensorMath.cpp | 4 ++-- aten/src/ATen/native/sparse/SparseUtils.h | 2 +- aten/src/ATen/templates/TypeDefault.cpp | 6 ++--- aten/src/ATen/test/apply_utils_test.cpp | 2 +- aten/src/ATen/test/atest.cpp | 2 +- torch/csrc/jit/import.cpp | 2 +- torch/csrc/jit/test_jit.cpp | 2 +- torch/csrc/utils/tensor_new.cpp | 2 +- 14 files changed, 31 insertions(+), 38 deletions(-) diff --git a/aten/src/ATen/SparseTensorImpl.cpp b/aten/src/ATen/SparseTensorImpl.cpp index 66b71dd7b8a..4bbec221474 100644 --- a/aten/src/ATen/SparseTensorImpl.cpp +++ b/aten/src/ATen/SparseTensorImpl.cpp @@ -4,11 +4,11 @@ namespace at { namespace { - Backend sparseTensorIdToDenseBackend(TensorTypeId type_id) { + DeviceType sparseTensorIdToDeviceType(TensorTypeId type_id) { if (type_id == SparseCPUTensorId()) { - return Backend::CPU; + return kCPU; } else if (type_id == SparseCUDATensorId()) { - return Backend::CUDA; + return kCUDA; } else { AT_ERROR("Cannot construct SparseTensor with non-sparse tensor type ID ", type_id); } @@ -33,8 +33,8 @@ SparseTensorImpl::SparseTensorImpl(at::TensorTypeId type_id, const caffe2::TypeM , size_{0} , sparseDims_(1) , denseDims_(0) - , indices_(globalContext().getNonVariableTypeOpt(sparseTensorIdToDenseBackend(type_id), ScalarType::Long)->tensor({1, 0})) - , values_(globalContext().getNonVariableTypeOpt(sparseTensorIdToDenseBackend(type_id), dataTypeToScalarType(data_type.id()))->tensor()) {} + , indices_(at::empty({1, 0}, TensorOptions(false).device(sparseTensorIdToDeviceType(type_id)).dtype(ScalarType::Long))) + , values_(at::empty({0}, TensorOptions(false).device(sparseTensorIdToDeviceType(type_id)).dtype(dataTypeToScalarType(data_type.id())))) {} IntList SparseTensorImpl::sizes() const { return size_; diff --git a/aten/src/ATen/native/LegacyBridge.cpp b/aten/src/ATen/native/LegacyBridge.cpp index 5fc554410ac..e207ac32637 100644 --- a/aten/src/ATen/native/LegacyBridge.cpp +++ b/aten/src/ATen/native/LegacyBridge.cpp @@ -136,7 +136,7 @@ Tensor& addmm_(Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta Tensor tensor(const Type& dtype) { if (_type_has_native(dtype)) { - return at::getType(dtype.options()).native_tensor(); + return at::getType(dtype.options()).native_tensor({0}); } else { return at::getType(dtype.options()).th_tensor(); } diff --git a/aten/src/ATen/native/TensorFactories.cpp b/aten/src/ATen/native/TensorFactories.cpp index 2e37acc951a..5e6c1aa25fa 100644 --- a/aten/src/ATen/native/TensorFactories.cpp +++ b/aten/src/ATen/native/TensorFactories.cpp @@ -147,11 +147,11 @@ Tensor empty_like(const Tensor& self) { Tensor empty_like(const Tensor& self, const TensorOptions& options) { if (options.layout() == kSparse && self.type().is_sparse()) { - auto res = native::empty({0}, options); // to be resized + auto res = at::empty({0}, options); // to be resized res.sparse_resize_and_clear_(self.sizes(), self._sparseDims(), self._denseDims()); return res; } - return native::empty(self.sizes(), options); + return at::empty(self.sizes(), options); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eye ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -161,7 +161,7 @@ Tensor eye(int64_t n, const TensorOptions& options) { } Tensor eye(int64_t n, int64_t m, const TensorOptions& options) { - auto tensor = native::empty({0}, options); // to be resized + auto tensor = at::empty({0}, options); // to be resized return at::eye_out(tensor, n, m); } @@ -196,7 +196,7 @@ Tensor full(IntList size, Scalar fill_value, const TensorOptions& options) { if (options.layout() == kSparse) { AT_ERROR("full(...) is not implemented for sparse layout"); } - auto result = native::empty(size, options); + auto result = at::empty(size, options); return result.fill_(fill_value); } @@ -287,7 +287,7 @@ Tensor rand(IntList size, const TensorOptions& options) { } Tensor rand(IntList size, Generator* generator, const TensorOptions& options) { - auto result = native::empty(size, options); + auto result = at::empty(size, options); return result.uniform_(0, 1, generator); } @@ -336,7 +336,7 @@ Tensor randint( IntList size, Generator* generator, const TensorOptions& options) { - auto result = native::empty(size, options); + auto result = at::empty(size, options); return result.random_(low, high, generator); } @@ -397,7 +397,7 @@ Tensor randn(IntList size, const TensorOptions& options) { } Tensor randn(IntList size, Generator* generator, const TensorOptions& options) { - auto result = native::empty(size, options); + auto result = at::empty(size, options); return result.normal_(0, 1, generator); } @@ -454,7 +454,7 @@ Tensor randperm(int64_t n, const TensorOptions& options) { } Tensor randperm(int64_t n, Generator* generator, const TensorOptions& options) { - auto tensor = native::empty(n, options); + auto tensor = at::empty(n, options); return at::randperm_out(tensor, n, generator); } @@ -499,7 +499,7 @@ Tensor& range_out(Tensor& result, Scalar start, Scalar end, Scalar step) { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ zeros ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor zeros(IntList size, const TensorOptions& options) { - auto result = native::empty(size, options); + auto result = at::empty(size, options); return result.zero_(); } @@ -519,7 +519,7 @@ Tensor zeros_like(const Tensor& self) { Tensor zeros_like(const Tensor& self, const TensorOptions& options) { if (options.layout() == kSparse && self.type().is_sparse()) { - auto res = native::empty({0}, options); // to be resized + auto res = at::empty({0}, options); // to be resized res.sparse_resize_and_clear_(self.sizes(), self._sparseDims(), self._denseDims()); return res; } @@ -538,7 +538,7 @@ Tensor bartlett_window( const TensorOptions& options) { window_function_checks("bartlett_window", options, window_length); if (window_length == 0) { - return native::empty({0}, options); + return at::empty({0}, options); } if (window_length == 1) { return native::ones({1}, options); @@ -606,7 +606,7 @@ Tensor hamming_window( const TensorOptions& options) { window_function_checks("hamming_window", options, window_length); if (window_length == 0) { - return native::empty({0}, options); + return at::empty({0}, options); } if (window_length == 1) { return native::ones({1}, options); diff --git a/aten/src/ATen/native/cuda/TensorFactories.cu b/aten/src/ATen/native/cuda/TensorFactories.cu index cbddd0ae87a..774e0cc5d90 100644 --- a/aten/src/ATen/native/cuda/TensorFactories.cu +++ b/aten/src/ATen/native/cuda/TensorFactories.cu @@ -45,7 +45,7 @@ Tensor& randperm_out_cuda(Tensor& result, int64_t n, Generator* generator) { result.resize_({n}); if (result.type().scalarType() == at::ScalarType::Half) { - auto result_float = CUDA(kFloat).tensor({n}); + auto result_float = at::empty({n}, TensorOptions(false).device(Device(DeviceType::CUDA))); result.copy_(randperm_out_cuda(result_float, n, generator)); } else { if (n < 30000) { // For small inputs, we offload it to CPU instead. diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index b4ebdfb634e..082ca4999a0 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -1895,13 +1895,6 @@ - func: addmm_(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor variants: method - -- func: native_tensor(Type self_ty) -> Tensor - variants: [] - dispatch: - SparseCPU: new_sparse - SparseCUDA: new_sparse - - func: native_tensor(Type self_ty, IntList size) -> Tensor variants: [] dispatch: diff --git a/aten/src/ATen/native/sparse/SparseTensor.cpp b/aten/src/ATen/native/sparse/SparseTensor.cpp index f8c73ad5ca1..41e0918b0a4 100644 --- a/aten/src/ATen/native/sparse/SparseTensor.cpp +++ b/aten/src/ATen/native/sparse/SparseTensor.cpp @@ -99,7 +99,7 @@ SparseTensor new_with_tensor_sparse(const LongTensor& indices, const Tensor& val computed_indices_sizes.add_(1); // len = max_index + 1 LongTensor cpu_computed_indices_sizes; if (computed_indices_sizes.is_cuda()) { - cpu_computed_indices_sizes = at::CPU(kLong).tensor(computed_indices_sizes.sizes()); + cpu_computed_indices_sizes = at::empty(computed_indices_sizes.sizes(), TensorOptions(false).dtype(kLong)); cpu_computed_indices_sizes.copy_(computed_indices_sizes); } else { cpu_computed_indices_sizes = computed_indices_sizes; diff --git a/aten/src/ATen/native/sparse/SparseTensorMath.cpp b/aten/src/ATen/native/sparse/SparseTensorMath.cpp index c71e3845097..39e090aa2bd 100644 --- a/aten/src/ATen/native/sparse/SparseTensorMath.cpp +++ b/aten/src/ATen/native/sparse/SparseTensorMath.cpp @@ -623,7 +623,7 @@ SparseTensor& hspmm_out_sparse_cpu(SparseTensor& r, const SparseTensor& sparse_, return r; } - LongTensor indices = at::CPU(kLong).tensor({1, nnz}); + LongTensor indices = at::empty({1, nnz}, TensorOptions(false).dtype(kLong)); // Initialize the sparse matrix that will be used with spaddmm to send rows // from the dense matrix to rows of the output's value tensor @@ -715,7 +715,7 @@ SparseTensor& _sspaddmm_out_cpu( int64_t t_nnz = t._nnz(); int64_t r_nnz = nnz * dim_k + t_nnz; - LongTensor newi = native::empty({2, r_nnz}, kLong); + LongTensor newi = at::empty({2, r_nnz}, kLong); LongTensor newv = native::zeros({r_nnz}, values.options()); if (t_nnz != 0) { diff --git a/aten/src/ATen/native/sparse/SparseUtils.h b/aten/src/ATen/native/sparse/SparseUtils.h index a0fbf4ea904..2e48fe41238 100644 --- a/aten/src/ATen/native/sparse/SparseUtils.h +++ b/aten/src/ATen/native/sparse/SparseUtils.h @@ -62,7 +62,7 @@ inline SparseTensor _new_with_dims_and_tensor_sparse( ArrayRef sizes, const LongTensor& indices, const Tensor& values) { - SparseTensor self = new_sparse(dtype); + SparseTensor self = at::empty({0}, dtype.options()); _get_sparse_impl(self)->resize_(sparseDims, denseDims, sizes); _alias_into_sparse(self, indices, values); return self; diff --git a/aten/src/ATen/templates/TypeDefault.cpp b/aten/src/ATen/templates/TypeDefault.cpp index 03309f8fe9e..71c50d5b3df 100644 --- a/aten/src/ATen/templates/TypeDefault.cpp +++ b/aten/src/ATen/templates/TypeDefault.cpp @@ -30,8 +30,8 @@ Tensor TypeDefault::copy(const Tensor & src, bool non_blocking, optional } AT_CHECK(src.defined(), "attempt to copy an undefined tensor"); Tensor r; - if (is_sparse()) r = this->native_tensor(); - else r = this->tensor(src.sizes()); + if (is_sparse()) r = this->native_tensor({0}); + else r = at::empty(src.sizes(), this->options()); r.copy_(src, non_blocking); return r; } @@ -118,7 +118,7 @@ Storage TypeDefault::unsafeStorageFromTH(void * th_pointer, bool retain) const { Tensor TypeDefault::scalarTensor(Scalar s) const { - return tensor({}).fill_(s); + return at::empty({}, this->options()).fill_(s); } ${type_method_definitions} diff --git a/aten/src/ATen/test/apply_utils_test.cpp b/aten/src/ATen/test/apply_utils_test.cpp index 71715a2d4b0..96901a0babc 100644 --- a/aten/src/ATen/test/apply_utils_test.cpp +++ b/aten/src/ATen/test/apply_utils_test.cpp @@ -40,7 +40,7 @@ void test(Type& type, IntList shape, int64_t a = 0, int64_t b = 1) { auto a1 = at::empty({0}, type.options()); auto a2 = at::empty({0}, type.options()); auto a3 = at::empty({0}, type.options()); - auto a4 = CPU(kDouble).tensor(); + auto a4 = at::empty({0}, at::TensorOptions(false).dtype(kDouble)); std::vector tensors({a0, a1, a2, a3, a4}); for (size_t i = 0; i < tensors.size(); i++) { diff --git a/aten/src/ATen/test/atest.cpp b/aten/src/ATen/test/atest.cpp index 96c5ed11897..2110ec2cbc6 100644 --- a/aten/src/ATen/test/atest.cpp +++ b/aten/src/ATen/test/atest.cpp @@ -94,7 +94,7 @@ TEST(atest, atest) { if (at::hasCUDA()) { int isgone = 0; { - auto base = CUDA(kFloat).tensor({1, 2, 3}); + auto base = at::empty({1,2,3}, TensorOptions(false).device(kCUDA)); auto f2 = CUDA(kFloat).tensorFromBlob( base.data_ptr(), {1, 2, 3}, [&](void*) { isgone++; }); } diff --git a/torch/csrc/jit/import.cpp b/torch/csrc/jit/import.cpp index 64985bb02a7..36cf36bc162 100644 --- a/torch/csrc/jit/import.cpp +++ b/torch/csrc/jit/import.cpp @@ -64,7 +64,7 @@ at::ScalarType DecoderBase::onnxTypeToATenType(onnx::TensorProto_DataType onnx_t } at::Tensor DecoderBase::buildTensor(const onnx::TensorProto& tensor_proto) { - at::Tensor tensor = at::CPU(onnxTypeToATenType(tensor_proto.data_type())).tensor(); + at::Tensor tensor = at::empty({0}, at::TensorOptions(false).dtype(onnxTypeToATenType(tensor_proto.data_type()))); std::vector sizes = { tensor_proto.dims().begin(), tensor_proto.dims().end() }; tensor.resize_(sizes); diff --git a/torch/csrc/jit/test_jit.cpp b/torch/csrc/jit/test_jit.cpp index 9942437c9eb..f4de05273b9 100644 --- a/torch/csrc/jit/test_jit.cpp +++ b/torch/csrc/jit/test_jit.cpp @@ -521,7 +521,7 @@ struct ADTestSpec { std::vector make_vars() const { std::vector out; for (const auto & m : input_meta) { - out.emplace_back(autograd::make_variable(at::CPU(at::kFloat).tensor(m).normal_(), /*requires_grad=*/true)); + out.emplace_back(autograd::make_variable(at::empty(m, at::TensorOptions(false)).normal_(), /*requires_grad=*/true)); } return out; } diff --git a/torch/csrc/utils/tensor_new.cpp b/torch/csrc/utils/tensor_new.cpp index 4c6a2855ea2..abbfa1a5f91 100644 --- a/torch/csrc/utils/tensor_new.cpp +++ b/torch/csrc/utils/tensor_new.cpp @@ -231,7 +231,7 @@ Tensor internal_new_from_data(const Type & type, at::optional device_opt auto sizes = compute_sizes(data); ScalarType scalarType = type_inference ? infer_scalar_type(data) : type.scalarType(); - auto tensor = autograd::make_variable(CPU(scalarType).tensor(sizes), /*requires_grad=*/false); + auto tensor = autograd::make_variable(at::empty(sizes, at::TensorOptions(false).dtype(scalarType)), /*requires_grad=*/false); recursive_store( (char*)tensor.data_ptr(), tensor.sizes(), tensor.strides(), 0, scalarType, tensor.type().elementSizeInBytes(), data);