pytorch/test/cpp/api/tensor_cuda.cpp
Tongzhou Wang d8dab6ffa8 Add tensor.to(options) (#13146)
Summary:
ezyang on the template hack
smessmer on SFINAE of the `TensorOptions(Device)`
goldsborough on the C++ API test changes
zdevito on the `jit` codegen changes
Pull Request resolved: https://github.com/pytorch/pytorch/pull/13146

Reviewed By: ezyang

Differential Revision: D12823809

Pulled By: SsnL

fbshipit-source-id: 98d65c401c98fda1c6fa358e4538f86c6495abdc
2018-10-29 16:26:06 -07:00

116 lines
4.6 KiB
C++

#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <cmath>
#define REQUIRE_TENSOR_OPTIONS(device_, index_, type_, layout_) \
ASSERT_TRUE( \
tensor.device().type() == at::Device((device_), (index_)).type()); \
ASSERT_TRUE( \
tensor.device().index() == at::Device((device_), (index_)).index()); \
ASSERT_EQ(tensor.dtype(), (type_)); \
ASSERT_TRUE(tensor.layout() == (layout_))
TEST(TensorTest, AllocatesTensorOnTheCorrectDevice_MultiCUDA) {
auto tensor = at::tensor({1, 2, 3}, at::device({at::kCUDA, 1}));
ASSERT_EQ(tensor.device().type(), at::Device::Type::CUDA);
ASSERT_EQ(tensor.device().index(), 1);
}
TEST(TensorTest, ToDevice_CUDA) {
auto tensor = at::empty({3, 4});
REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
tensor = tensor.to({at::kCUDA, 1});
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kFloat, at::kStrided);
tensor = tensor.to({at::kCUDA, 0});
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kFloat, at::kStrided);
tensor = tensor.to({at::kCUDA, 1});
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kFloat, at::kStrided);
tensor = tensor.to(at::Device(at::kCPU));
REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
tensor = tensor.to(at::kCUDA);
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kFloat, at::kStrided);
tensor = tensor.to(at::TensorOptions({at::kCUDA, 1}));
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kFloat, at::kStrided);
tensor = tensor.to(at::TensorOptions({at::kCUDA, 0}));
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kFloat, at::kStrided);
tensor = tensor.to(at::TensorOptions(at::kDouble));
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kDouble, at::kStrided);
tensor = tensor.to(at::TensorOptions({at::kCUDA, 1}));
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kDouble, at::kStrided);
tensor = tensor.to(at::TensorOptions(at::kInt));
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kInt, at::kStrided);
tensor = tensor.to(at::TensorOptions(at::Device(at::kCPU)));
REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kInt, at::kStrided);
tensor = tensor.to(at::TensorOptions(at::kCUDA));
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kInt, at::kStrided);
}
TEST(TensorTest, ToTensorAndTensorAttributes_CUDA) {
auto tensor = at::empty({3, 4});
REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
auto other = at::empty({3, 4}, at::kFloat);
tensor = tensor.to(other);
REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
other = at::empty({3, 4}, at::TensorOptions(at::kCUDA).dtype(at::kDouble));
tensor = tensor.to(other.dtype());
REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kDouble, at::kStrided);
tensor = tensor.to(other.device());
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kDouble, at::kStrided);
other = at::empty({3, 4}, at::TensorOptions({at::kCUDA, 1}).dtype(at::kLong));
tensor = tensor.to(other.device(), other.dtype());
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kLong, at::kStrided);
other = at::empty({3, 4}, at::kFloat);
tensor = tensor.to(other.options());
REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
}
TEST(TensorTest, ToDoesNotCopyWhenOptionsAreAllTheSame_CUDA) {
auto tensor = at::empty({3, 4}, at::TensorOptions(at::kFloat).device(at::Device("cuda")));
auto hopefully_not_copy = tensor.to(tensor.options());
ASSERT_EQ(hopefully_not_copy.data<float>(), tensor.data<float>());
hopefully_not_copy = tensor.to(at::kFloat);
ASSERT_EQ(hopefully_not_copy.data<float>(), tensor.data<float>());
hopefully_not_copy = tensor.to("cuda");
ASSERT_EQ(hopefully_not_copy.data<float>(), tensor.data<float>());
hopefully_not_copy = tensor.to(at::TensorOptions("cuda"));
ASSERT_EQ(hopefully_not_copy.data<float>(), tensor.data<float>());
hopefully_not_copy = tensor.to(at::TensorOptions(at::kFloat));
ASSERT_EQ(hopefully_not_copy.data<float>(), tensor.data<float>());
}
TEST(TensorTest, ToDeviceAndDtype_CUDA) {
auto tensor = at::empty({3, 4});
REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
tensor = tensor.to({at::kCUDA, 1}, at::kInt);
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kInt, at::kStrided);
tensor = tensor.to(at::TensorOptions({at::kCUDA, 0}).dtype(at::kLong));
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kLong, at::kStrided);
tensor = tensor.to(at::TensorOptions({at::kCUDA, 1}).dtype(at::kDouble));
REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kDouble, at::kStrided);
tensor = tensor.to(at::kCPU, at::kInt);
REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kInt, at::kStrided);
}