mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
* this removes the flag controlling whether the interpreter works on variables. * now the interpreter _always_ works on variables * constants in the IR are still _always_ non-variables, and an assert was added to ensure this. * as_tensor was split into as_variable and as_tensor since it is sometimes used to construct constants in the IR * I tried changing the IR to also always use variables but that change was much more cross cutting and fragile and I never got it working
30 lines
857 B
C++
30 lines
857 B
C++
#include <torch/csrc/variable_tensor_functions.h>
|
|
#include <torch/csrc/autograd/generated/VariableType.h>
|
|
#include <torch/csrc/autograd/variable.h>
|
|
|
|
namespace torch {
|
|
at::Type& getType(at::Backend backend, at::ScalarType type) {
|
|
return *autograd::VariableType::getType(at::getType(backend, type));
|
|
}
|
|
|
|
at::Type& CPU(at::ScalarType type) {
|
|
return torch::getType(at::kCPU, type);
|
|
}
|
|
|
|
at::Type& CUDA(at::ScalarType type) {
|
|
return torch::getType(at::kCUDA, type);
|
|
}
|
|
|
|
at::Tensor toTensor(const at::Scalar& scalar) {
|
|
return autograd::make_variable(scalar.toTensor());
|
|
}
|
|
|
|
void set_requires_grad(at::Tensor& tensor, bool requires_grad) noexcept {
|
|
autograd::as_variable_ref(tensor).set_requires_grad(requires_grad);
|
|
}
|
|
|
|
bool requires_grad(const at::Tensor& tensor) noexcept {
|
|
return autograd::as_variable_ref(tensor).requires_grad();
|
|
}
|
|
} // namespace torch
|