Remove .data() use in C++ frontend (#13675)

Summary:
Removes the last uses of `.data()` in implementation code of the C++ frontend.

CC yf225

ezyang ebetica apaszke
Pull Request resolved: https://github.com/pytorch/pytorch/pull/13675

Differential Revision: D12966061

Pulled By: goldsborough

fbshipit-source-id: fbc0c83c3ba56598ff853bc7b1ddf9005fdd9c41
This commit is contained in:
Peter Goldsborough 2018-11-07 17:28:13 -08:00 committed by Facebook Github Bot
parent eb88098e11
commit 87b47ff850
2 changed files with 9 additions and 10 deletions

View File

@ -97,7 +97,7 @@ class RNNImplBase : public torch::nn::Cloneable<Derived> {
std::vector<Tensor> b_hh;
protected:
/// The function signature of `at::rnn_relu`, `at::rnn_tanh` and `at::gru`.
/// The function signature of `rnn_relu`, `rnn_tanh` and `gru`.
using RNNFunctionSignature = std::tuple<Tensor, Tensor>(
/*input=*/const Tensor&,
/*state=*/const Tensor&,

View File

@ -19,18 +19,17 @@ LBFGSOptions::LBFGSOptions(double learning_rate)
Tensor LBFGS::gather_flat_grad() {
std::vector<Tensor> views;
for (auto& parameter : parameters_) {
views.push_back(autograd::Variable(parameter.grad()).data().view(-1));
views.push_back(parameter.grad().view(-1));
}
return at::cat(views);
return torch::cat(views);
}
void LBFGS::add_grad(const torch::Tensor& step_size, const Tensor& update) {
int64_t offset = 0;
for (auto& parameter : parameters_) {
int64_t numel = parameter.numel();
Tensor& pd = autograd::Variable(parameter).data();
pd.add_(
update.slice(0, offset, offset + numel, 1).view_as(pd),
parameter.add_(
update.slice(0, offset, offset + numel, 1).view_as(parameter),
step_size.item<float>());
offset += numel;
}
@ -49,7 +48,7 @@ torch::Tensor LBFGS::step(LossClosure closure) {
return loss;
}
Tensor ONE = flat_grad.type().scalarTensor(1);
Tensor ONE = torch::tensor(1, flat_grad.options());
int64_t n_iter = 0;
while (n_iter < options.max_iter_) {
@ -91,7 +90,7 @@ torch::Tensor LBFGS::step(LossClosure closure) {
Tensor q = flat_grad.neg();
for (int64_t i = num_old - 1; i >= 0; i--) {
al.at(i) = old_stps.at(i).dot(q) * ro.at(i);
q.add_(old_dirs.at(i), -at::_local_scalar(al.at(i)));
q.add_(old_dirs.at(i), -al.at(i).item<float>());
}
// Multiply by initial Hessian
@ -101,7 +100,7 @@ torch::Tensor LBFGS::step(LossClosure closure) {
for (int64_t i = 0; i < num_old; i++) {
Tensor be_i = old_dirs.at(i).dot(r) * ro.at(i);
r.add_(old_stps.at(i), at::_local_scalar(al.at(i) - be_i));
r.add_(old_stps.at(i), (al.at(i) - be_i).item<float>());
}
prev_flat_grad.copy_(flat_grad);
}
@ -114,7 +113,7 @@ torch::Tensor LBFGS::step(LossClosure closure) {
if (n_iter == 1) {
t = torch::min(ONE, ONE / abs_grad_sum) * options.learning_rate_;
} else {
t = at::tensor(options.learning_rate_, torch::kFloat32);
t = torch::tensor(options.learning_rate_, torch::kFloat32);
}
Tensor gtd = flat_grad.dot(d);