Don't make factory methods create a tensor and then immediately copy it (#17565)

Summary:
Create a `make_variable` override that moves out of a tensor instead of going through `shallow_copy_and_detach`. Call this override from factory methods like `empty` that create a brand new tensor, do nothing with it, and then copy it into a variable.

Will update this with actual numbers, but it seems to get rid of around 20-40% of the overhead of calling `torch.empty(0)`
Pull Request resolved: https://github.com/pytorch/pytorch/pull/17565

Differential Revision: D14266130

Pulled By: umanwizard

fbshipit-source-id: f57d5f2ca3f80ee8ee96d50f905e852fd10db941
This commit is contained in:
Brennan Vincent 2019-03-03 22:13:27 -08:00 committed by Facebook Github Bot
parent 7a51c03a30
commit 6a297b8675
2 changed files with 29 additions and 2 deletions

View File

@ -13,7 +13,7 @@ inline at::Tensor ${name}(${formals}) {
${pre_record_trace} ${pre_record_trace}
at::Tensor tensor = at::${name}(${actuals}); at::Tensor tensor = at::${name}(${actuals});
at::Tensor result = at::Tensor result =
autograd::make_variable(tensor, /*requires_grad=*/${requires_grad}); autograd::make_variable_consuming(std::move(tensor), /*requires_grad=*/${requires_grad});
${post_record_trace} ${post_record_trace}
return result; return result;
} }

View File

@ -104,7 +104,8 @@ struct TORCH_API Variable : public at::Tensor {
bool allow_tensor_metadata_change, bool allow_tensor_metadata_change,
Edge gradient_edge); Edge gradient_edge);
/// Creates a `Variable` from the given `Tensor`. `requires_grad` should be /// Creates a `Variable` from the given `Tensor`, copying its underlying `TensorImpl`.
/// `requires_grad` should be
/// set only for leaves, and determines whether the `Variable` will accumulate /// set only for leaves, and determines whether the `Variable` will accumulate
/// gradients. NOTE: `data` must *not* be a `Variable` already. Its dynamic /// gradients. NOTE: `data` must *not* be a `Variable` already. Its dynamic
/// type *must* be `Tensor`. /// type *must* be `Tensor`.
@ -113,6 +114,16 @@ struct TORCH_API Variable : public at::Tensor {
bool requires_grad, bool requires_grad,
bool allow_tensor_metadata_change); bool allow_tensor_metadata_change);
/// Creates a `Variable` from the given `Tensor`, consuming its underlying `TensorImpl`.
/// This is intended to be used from functions that immediately create a `Tensor`,
/// convert it to a `Variable`, and then free it; it has been found to
/// decrease the overhead of those operations, in some situations.
/// The comments about `requires_grad` and `data` on the above version also apply to this one.
friend Variable make_variable_consuming(
at::Tensor data,
bool requires_grad,
bool allow_tensor_metadata_change);
/// Creates a `Variable` from the given `Tensor` and specify a /// Creates a `Variable` from the given `Tensor` and specify a
/// `gradient_edge`, i.e. a (function, input_nr) pair specifying the function /// `gradient_edge`, i.e. a (function, input_nr) pair specifying the function
/// in the autograd graph, and what particular input of that function, this /// in the autograd graph, and what particular input of that function, this
@ -576,6 +587,22 @@ inline Variable make_variable(
return Variable(); return Variable();
} }
inline Variable make_variable_consuming(
at::Tensor data,
bool requires_grad = false,
bool allow_tensor_metadata_change = true) {
AT_CHECK(
!data.is_variable(),
"Must not create a new variable from a variable, use its .data()");
if (data.defined()) {
AT_ASSERT(data.getIntrusivePtr().use_count() == 1);
data.unsafeGetTensorImpl()->set_allow_tensor_metadata_change(allow_tensor_metadata_change);
auto autograd_meta = c10::guts::make_unique<Variable::AutogradMeta>();
return Variable(c10::make_intrusive<Variable::Impl>(std::move(data), std::move(autograd_meta), requires_grad));
}
return Variable();
}
inline Variable make_variable( inline Variable make_variable(
at::Tensor data, at::Tensor data,
Edge gradient_edge, Edge gradient_edge,