pytorch/torch/csrc/autograd/input_buffer.cpp
Richard Zou e60a7c2c88 codemod tensor.type().is_cuda(), tensor.type().is_sparse() (#13590)
Summary:
Followup to #12841

Changed these to not require type dispatch:
tensor.type().is_cuda() -> tensor.is_cuda()
tensor.type().is_sparse() -> tensor.is_sparse()
isVariable(tensor.type()) -> tensor.is_variable()

This probably does not affect performance
very much in most cases but it is nice to have.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/13590

Reviewed By: ezyang

Differential Revision: D12929301

Pulled By: zou3519

fbshipit-source-id: 8ac5c6200c579dd7a44fb4ee58fc9bb170feb1d7
2018-11-07 07:27:42 -08:00

48 lines
1.0 KiB
C++

#include "torch/csrc/autograd/input_buffer.h"
#include "torch/csrc/autograd/functions/basic_ops.h"
#include <ATen/DeviceGuard.h>
#include <cstddef>
#include <utility>
#include <vector>
namespace torch { namespace autograd {
void InputBuffer::add(size_t pos, Variable var) {
AT_ASSERT(pos < buffer.size());
if (!var.defined()) {
return;
}
auto& old_var = buffer[pos];
if (!old_var.defined()) {
buffer[pos] = std::move(var);
} else {
at::DeviceGuard device_guard(var);
// ATen doesn't route sparse additions correctly...
if (old_var.is_sparse()) {
buffer[pos] = var + old_var;
} else {
buffer[pos] = old_var + var;
}
}
}
auto InputBuffer::device() const -> int {
for (auto& var : buffer) {
if (var.defined() && var.is_cuda()) {
return var.get_device();
}
}
return -1;
}
auto InputBuffer::variables(InputBuffer&& g) -> std::vector<Variable> {
std::vector<Variable> result = std::move(g.buffer);
return result;
}
}} // namespace torch::autograd