mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/10478 - Removed Backend constructor from Device, and fixed all use-sites to use DeviceType::CPU instead of kCPU, or use a new function backendToDeviceType to perform the conversion. - New method device_type() on Type; it gives you the underlying device type, e.g., CPU for SparseCPU. - We add backward compatibility for kCPU/kCUDA uses, by introducing a new special type which is implicitly convertible to both DeviceType and Backend. As long as you don't define a function that's overloaded on both DeviceType and Backend (but not on BackendOrDeviceType), the implicit conversions will ensure that uses of at::Device(at::kCPU) keep working. We fixed use-sites in the library, but did NOT fix sites in the test code, so that we can exercise this BC code. Reviewed By: Yangqing Differential Revision: D9301861 fbshipit-source-id: 9a9d88620500715c7b37e655b4fd761f6dd72716
30 lines
873 B
C++
30 lines
873 B
C++
#include <torch/csrc/variable_tensor_functions.h>
|
|
#include <torch/csrc/autograd/generated/VariableType.h>
|
|
#include <torch/csrc/autograd/variable.h>
|
|
|
|
namespace torch {
|
|
at::Type& getType(at::Backend backend, at::ScalarType type) {
|
|
return *autograd::VariableType::getType(at::getType(backend, type));
|
|
}
|
|
|
|
at::Type& CPU(at::ScalarType type) {
|
|
return torch::getType(at::Backend::CPU, type);
|
|
}
|
|
|
|
at::Type& CUDA(at::ScalarType type) {
|
|
return torch::getType(at::Backend::CUDA, type);
|
|
}
|
|
|
|
at::Tensor toTensor(const at::Scalar& scalar) {
|
|
return autograd::make_variable(scalar.toTensor());
|
|
}
|
|
|
|
void set_requires_grad(at::Tensor& tensor, bool requires_grad) noexcept {
|
|
autograd::as_variable_ref(tensor).set_requires_grad(requires_grad);
|
|
}
|
|
|
|
bool requires_grad(const at::Tensor& tensor) noexcept {
|
|
return autograd::as_variable_ref(tensor).requires_grad();
|
|
}
|
|
} // namespace torch
|