mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/14414 The previous functions were CUDA-centric, and lead to lots of places where we improperly assumed that CUDA is the only game in town (it's not). Best to delete them. What are your alternatives? This diff fix some use sites which may give you some ideas. In particular, the "given a device type, give me the current device for that device type" might be a good function to enshrine for real. Reviewed By: gchanan Differential Revision: D13218540 fbshipit-source-id: 2f42cd6b9bdab4930d25166b8041c9466a1c6e0a
83 lines
2.8 KiB
C++
83 lines
2.8 KiB
C++
#include <gtest/gtest.h>
|
|
|
|
#include <ATen/Context.h>
|
|
#include <ATen/DeviceGuard.h>
|
|
#include <ATen/Functions.h>
|
|
#include <c10/core/ScalarType.h>
|
|
#include <ATen/core/TensorOptions.h>
|
|
|
|
#include <torch/cuda.h>
|
|
|
|
// NB: This file is compiled even in CPU build (for some reason), so
|
|
// make sure you don't include any CUDA only headers.
|
|
|
|
using namespace at;
|
|
|
|
// TODO: This might be generally helpful aliases elsewhere.
|
|
at::Device CPUDevice() {
|
|
return at::Device(at::kCPU);
|
|
}
|
|
at::Device CUDADevice(DeviceIndex index) {
|
|
return at::Device(at::kCUDA, index);
|
|
}
|
|
|
|
// A macro so we don't lose location information when an assertion fails.
|
|
#define REQUIRE_OPTIONS(device_, index_, type_, layout_) \
|
|
ASSERT_EQ(options.device().type(), Device((device_), (index_)).type()); \
|
|
ASSERT_TRUE( \
|
|
options.device().index() == Device((device_), (index_)).index()); \
|
|
ASSERT_EQ(typeMetaToScalarType(options.dtype()), (type_)); \
|
|
ASSERT_TRUE(options.layout() == (layout_))
|
|
|
|
#define REQUIRE_TENSOR_OPTIONS(device_, index_, type_, layout_) \
|
|
ASSERT_EQ(tensor.device().type(), Device((device_), (index_)).type()); \
|
|
ASSERT_EQ(tensor.device().index(), Device((device_), (index_)).index()); \
|
|
ASSERT_EQ(tensor.type().scalarType(), (type_)); \
|
|
ASSERT_TRUE(tensor.type().layout() == (layout_))
|
|
|
|
TEST(TensorOptionsTest, ConstructsWellFromCUDATypes_CUDA) {
|
|
auto options = CUDA(kFloat).options();
|
|
REQUIRE_OPTIONS(kCUDA, -1, kFloat, kStrided);
|
|
|
|
options = CUDA(kInt).options();
|
|
REQUIRE_OPTIONS(kCUDA, -1, kInt, kStrided);
|
|
|
|
options = getNonVariableType(Backend::SparseCUDA, kFloat).options();
|
|
REQUIRE_OPTIONS(kCUDA, -1, kFloat, kSparse);
|
|
|
|
options = getNonVariableType(Backend::SparseCUDA, kByte).options();
|
|
REQUIRE_OPTIONS(kCUDA, -1, kByte, kSparse);
|
|
|
|
options = CUDA(kFloat).options(/*device=*/5);
|
|
REQUIRE_OPTIONS(kCUDA, 5, kFloat, kStrided);
|
|
|
|
options =
|
|
getNonVariableType(Backend::SparseCUDA, kFloat).options(/*device=*/5);
|
|
REQUIRE_OPTIONS(kCUDA, 5, kFloat, kSparse);
|
|
}
|
|
|
|
TEST(TensorOptionsTest, ConstructsWellFromCUDATensors_MultiCUDA) {
|
|
auto options = empty(5, device(kCUDA).dtype(kDouble)).options();
|
|
REQUIRE_OPTIONS(kCUDA, 0, kDouble, kStrided);
|
|
|
|
options = empty(5, getNonVariableType(Backend::SparseCUDA, kByte)).options();
|
|
REQUIRE_OPTIONS(kCUDA, 0, kByte, kSparse);
|
|
|
|
if (torch::cuda::device_count() > 1) {
|
|
Tensor tensor;
|
|
{
|
|
DeviceGuard guard(CUDADevice(1));
|
|
tensor = empty(5, device(kCUDA));
|
|
}
|
|
options = tensor.options();
|
|
REQUIRE_OPTIONS(kCUDA, 1, kFloat, kStrided);
|
|
|
|
{
|
|
DeviceGuard guard(CUDADevice(1));
|
|
tensor = empty(5, device(kCUDA).layout(kSparse));
|
|
}
|
|
options = tensor.options();
|
|
REQUIRE_OPTIONS(kCUDA, 1, kFloat, kSparse);
|
|
}
|
|
}
|