mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[structural binding][7/N] Replace std::tie with structural binding (#130216)
Follows #120353 Pull Request resolved: https://github.com/pytorch/pytorch/pull/130216 Approved by: https://github.com/albanD
This commit is contained in:
parent
fb696bf264
commit
7a3ab1fe79
|
|
@ -853,8 +853,7 @@ std::tuple<Tensor, Tensor, Tensor> batch_norm_cpu(const Tensor& self, const std:
|
|||
std::tuple<Tensor, Tensor, Tensor, Tensor> _batch_norm_with_update_cpu(
|
||||
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
|
||||
Tensor& running_mean, Tensor& running_var, double momentum, double eps) {
|
||||
Tensor output, save_mean, save_var;
|
||||
std::tie(output, save_mean, save_var) =
|
||||
auto [output, save_mean, save_var] =
|
||||
batch_norm_cpu(input, weight_opt, bias_opt, running_mean, running_var, /*update*/true, momentum, eps);
|
||||
Tensor reserve = at::empty({0}, input.options().dtype(kByte));
|
||||
return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, save_mean, save_var, reserve);
|
||||
|
|
@ -876,8 +875,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> _batch_norm_no_update(
|
|||
double momentum, double eps) {
|
||||
const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();});
|
||||
const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();});
|
||||
Tensor output, save_mean, save_var;
|
||||
std::tie(output, save_mean, save_var) =
|
||||
auto [output, save_mean, save_var] =
|
||||
batch_norm_cpu(input, weight_opt, bias_opt, const_cast<Tensor&>(running_mean), const_cast<Tensor&>(running_var), /*update*/false, momentum, eps);
|
||||
Tensor reserve = at::empty({0}, input.options().dtype(kByte));
|
||||
return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, save_mean, save_var, reserve);
|
||||
|
|
|
|||
|
|
@ -1403,8 +1403,7 @@ Tensor zeros_like(
|
|||
auto res = at::native::sparse_compressed_tensor_with_dims(
|
||||
nnz, dense_dim, self.sizes(), blocksize, index_dtype,
|
||||
typeMetaToScalarType(options.dtype()), options.layout(), options.device(), options.pinned_memory());
|
||||
Tensor compressed_indices, plain_indices;
|
||||
std::tie(compressed_indices, plain_indices) = at::sparse_csr::getCompressedPlainIndices(res);
|
||||
auto [compressed_indices, plain_indices] = at::sparse_csr::getCompressedPlainIndices(res);
|
||||
compressed_indices.zero_();
|
||||
return res;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -182,8 +182,7 @@ std::tuple<Tensor, Tensor, Tensor> unique_cpu_sorted_template(
|
|||
// is to return a output size of ([1]), `flatten` here will do the job
|
||||
auto input_flattened = input.flatten();
|
||||
|
||||
Tensor input_sorted, indices;
|
||||
std::tie(input_sorted, indices) = input_flattened.sort();
|
||||
auto [input_sorted, indices] = input_flattened.sort();
|
||||
|
||||
scalar_t* input_sorted_data = input_sorted.data_ptr<scalar_t>();
|
||||
int64_t* indices_data = indices.data_ptr<int64_t>();
|
||||
|
|
|
|||
|
|
@ -218,8 +218,7 @@ cpu_adaptive_avg_pool2d_channels_last(
|
|||
int64_t d2 = 0;
|
||||
for (; d2 < size - (size % bVec::size()); d2 += bVec::size()) {
|
||||
bVec data_bvec = bVec::loadu(in + d2);
|
||||
fVec data_fvec0, data_fvec1;
|
||||
std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
|
||||
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
||||
|
||||
fVec sum_fvec0 = fVec::loadu(sum + d2) + data_fvec0;
|
||||
fVec sum_fvec1 = fVec::loadu(sum + d2 + fVec::size()) + data_fvec1;
|
||||
|
|
@ -643,8 +642,7 @@ cpu_adaptive_avg_pool3d_channels_last(
|
|||
int64_t d2 = 0;
|
||||
for (; d2 < size - (size % bVec::size()); d2 += bVec::size()) {
|
||||
bVec data_bvec = bVec::loadu(in + d2);
|
||||
fVec data_fvec0, data_fvec1;
|
||||
std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
|
||||
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
||||
|
||||
fVec sum_fvec0 = fVec::loadu(sum + d2) + data_fvec0;
|
||||
fVec sum_fvec1 = fVec::loadu(sum + d2 + fVec::size()) + data_fvec1;
|
||||
|
|
|
|||
|
|
@ -276,8 +276,7 @@ cpu_adaptive_max_pool2d_channels_last(
|
|||
for (; d2 < len; d2 += bVec::size()) {
|
||||
iVec index_ivec = iVec(ih * input_width + iw);
|
||||
bVec val_bvec = bVec::loadu(in + d2);
|
||||
fVec val_fvec0, val_fvec1;
|
||||
std::tie(val_fvec0, val_fvec1) = convert_to_float<scalar_t>(val_bvec);
|
||||
auto [val_fvec0, val_fvec1] = convert_to_float<scalar_t>(val_bvec);
|
||||
|
||||
iVec maxindex_ivec0 = iVec::loadu(index_buffer.get() + d2);
|
||||
iVec maxindex_ivec1 = iVec::loadu(index_buffer.get() + d2 + iVec::size());
|
||||
|
|
@ -766,8 +765,7 @@ cpu_adaptive_max_pool3d_channels_last(
|
|||
for (; d2 < len; d2 += bVec::size()) {
|
||||
iVec index_ivec = iVec(id * input_height * input_width + ih * input_width + iw);
|
||||
bVec val_bvec = bVec::loadu(in + d2);
|
||||
fVec val_fvec0, val_fvec1;
|
||||
std::tie(val_fvec0, val_fvec1) = convert_bfloat16_float(val_bvec);
|
||||
auto [val_fvec0, val_fvec1] = convert_bfloat16_float(val_bvec);
|
||||
|
||||
iVec maxindex_ivec0 = iVec::loadu(index_buffer.get() + d2);
|
||||
iVec maxindex_ivec1 = iVec::loadu(index_buffer.get() + d2 + iVec::size());
|
||||
|
|
|
|||
|
|
@ -309,8 +309,7 @@ void cpu_avg_pool2d_channels_last(
|
|||
int64_t d2 = 0;
|
||||
for (; d2 < size - (size % bVec::size()); d2 += bVec::size()) {
|
||||
bVec data_bvec = bVec::loadu(in + d2);
|
||||
fVec data_fvec0, data_fvec1;
|
||||
std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
|
||||
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
||||
|
||||
fVec sum_fvec0 = fVec::loadu(sum + d2) + data_fvec0;
|
||||
fVec sum_fvec1 = fVec::loadu(sum + d2 + fVec::size()) + data_fvec1;
|
||||
|
|
@ -869,8 +868,7 @@ void cpu_avg_pool3d_channels_last(
|
|||
int64_t d2 = 0;
|
||||
for (; d2 < size - (size % bVec::size()); d2 += bVec::size()) {
|
||||
bVec data_bvec = bVec::loadu(in + d2);
|
||||
fVec data_fvec0, data_fvec1;
|
||||
std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec);
|
||||
auto [data_fvec0, data_fvec1] = convert_bfloat16_float(data_bvec);
|
||||
|
||||
fVec sum_fvec0 = fVec::loadu(sum + d2) + data_fvec0;
|
||||
fVec sum_fvec1 = fVec::loadu(sum + d2 + fVec::size()) + data_fvec1;
|
||||
|
|
|
|||
|
|
@ -287,8 +287,7 @@ template<typename scalar_t>
|
|||
void infer_bin_edges_from_input(const Tensor& input, const int64_t N,
|
||||
std::vector<double> &leftmost_edges, std::vector<double> &rightmost_edges) {
|
||||
// Calls aminmax on input with dim=0, reducing all but the innermost dimension of input.
|
||||
Tensor min, max;
|
||||
std::tie(min, max) = aminmax(input, 0);
|
||||
auto [min, max] = aminmax(input, 0);
|
||||
|
||||
TORCH_INTERNAL_ASSERT(min.is_contiguous() && max.is_contiguous());
|
||||
|
||||
|
|
|
|||
|
|
@ -156,9 +156,7 @@ void calculate_mode(
|
|||
scalar_t* iter_begin = data;
|
||||
scalar_t* iter_end = data + n_element;
|
||||
|
||||
scalar_t mode;
|
||||
int64_t index;
|
||||
std::tie(mode, index) = ModeImpl<scalar_t>{}(iter_begin, iter_end);
|
||||
auto [mode, index] = ModeImpl<scalar_t>{}(iter_begin, iter_end);
|
||||
|
||||
// Place mode, index in output
|
||||
scalar_t* values_data = values.mutable_data_ptr<scalar_t>();
|
||||
|
|
|
|||
|
|
@ -211,8 +211,7 @@ std::tuple<Tensor, Tensor, Tensor> mkldnn_batch_norm(
|
|||
std::tuple<Tensor, Tensor, Tensor, Tensor> _batch_norm_with_update_mkldnn(
|
||||
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
|
||||
Tensor& running_mean, Tensor& running_var, double momentum, double eps) {
|
||||
Tensor output, save_mean, save_var;
|
||||
std::tie(output, save_mean, save_var) =
|
||||
auto [output, save_mean, save_var] =
|
||||
mkldnn_batch_norm(input, weight_opt, bias_opt, running_mean, running_var, /*train*/true, momentum, eps);
|
||||
Tensor reserve = empty_mkldnn({0}, input.scalar_type());
|
||||
return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, save_mean, save_var, reserve);
|
||||
|
|
|
|||
|
|
@ -174,8 +174,7 @@ sycl::event convolution(
|
|||
bool is_channels_last = use_channels_last_for_conv(src, weight, false);
|
||||
|
||||
// create usr_md for tensors, and md for conv primitive
|
||||
dnnl::memory::desc src_md, weight_md, dst_md;
|
||||
std::tie(src_md, weight_md, dst_md) = conv_get_md(src, weight, dst, groups, is_channels_last);
|
||||
auto [src_md, weight_md, dst_md] = conv_get_md(src, weight, dst, groups, is_channels_last);
|
||||
|
||||
auto bia_fmt = dnnl::memory::format_tag::x;
|
||||
auto bia_md = bia.defined()
|
||||
|
|
@ -269,8 +268,7 @@ sycl::event convolution_backward_weights(
|
|||
bool is_channels_last = use_channels_last_for_conv(src, diff_dst, /*is_transposed=*/false);
|
||||
|
||||
// create dnnl::memory desc
|
||||
dnnl::memory::desc src_md, weight_md, dst_md;
|
||||
std::tie(src_md, weight_md, dst_md) =
|
||||
auto [src_md, weight_md, dst_md] =
|
||||
conv_get_md(src, diff_weight, diff_dst, groups, is_channels_last);
|
||||
dnnl::memory::format_tag bia_fmt = dnnl::memory::format_tag::x;
|
||||
auto bia_md = diff_bia.defined()
|
||||
|
|
@ -371,8 +369,7 @@ sycl::event convolution_backward_data(
|
|||
bool is_channels_last = use_channels_last_for_conv(diff_dst, weight, /*is_transposed=*/false);
|
||||
|
||||
// create memory desc
|
||||
dnnl::memory::desc src_md, weight_md, dst_md;
|
||||
std::tie(src_md, weight_md, dst_md) =
|
||||
auto [src_md, weight_md, dst_md] =
|
||||
conv_get_md(diff_src, weight, diff_dst, groups, is_channels_last);
|
||||
dnnl::memory::format_tag bia_fmt = dnnl::memory::format_tag::x;
|
||||
auto bia_md = bias_defined
|
||||
|
|
|
|||
|
|
@ -165,9 +165,7 @@ sycl::event deconvolution(
|
|||
bool is_channels_last_suggested = use_channels_last_for_conv(src, weight, /*is_transposed=*/true);
|
||||
|
||||
// create usr_md for tensors, and md for conv primitive
|
||||
dnnl::memory::desc src_md, weight_md, dst_md;
|
||||
|
||||
std::tie(src_md, weight_md, dst_md) =
|
||||
auto [src_md, weight_md, dst_md] =
|
||||
deconv_get_plain_md(src, weight, dst, groups, is_channels_last_suggested);
|
||||
|
||||
dnnl::memory::format_tag bia_fmt = dnnl::memory::format_tag::x;
|
||||
|
|
@ -255,8 +253,7 @@ sycl::event deconvolution_backward_data(
|
|||
bool is_channels_last_suggested =
|
||||
use_channels_last_for_conv(diff_dst, weight, /*is_transposed=*/true);
|
||||
// create memory desc
|
||||
dnnl::memory::desc src_md, weight_md, dst_md;
|
||||
std::tie(src_md, weight_md, dst_md) =
|
||||
auto [src_md, weight_md, dst_md] =
|
||||
deconv_get_plain_md(
|
||||
diff_src, weight, diff_dst, groups, is_channels_last_suggested);
|
||||
|
||||
|
|
@ -350,8 +347,7 @@ sycl::event deconvolution_backward_weights(
|
|||
use_channels_last_for_conv(src, diff_dst, /*is_transposed=*/true);
|
||||
|
||||
// create memory desc
|
||||
dnnl::memory::desc src_md, weight_md, dst_md;
|
||||
std::tie(src_md, weight_md, dst_md) = deconv_get_plain_md(
|
||||
auto [src_md, weight_md, dst_md] = deconv_get_plain_md(
|
||||
src, diff_weight, diff_dst, groups, is_channels_last_suggested);
|
||||
|
||||
dnnl::memory::format_tag bia_fmt = dnnl::memory::format_tag::x;
|
||||
|
|
|
|||
|
|
@ -167,13 +167,10 @@ Tensor NestedTensor_elementwise_Tensor(
|
|||
".");
|
||||
}
|
||||
|
||||
NestedTensorImpl* self_impl = nullptr;
|
||||
NestedTensorImpl* other_impl = nullptr;
|
||||
|
||||
self_contiguous = supports_striding ? self.contiguous() : self;
|
||||
other_contiguous = supports_striding ? other.contiguous() : other;
|
||||
|
||||
std::tie(self_impl, other_impl) =
|
||||
auto [self_impl, other_impl] =
|
||||
get_elementwise_nested_tensor_impl(self_contiguous, other_contiguous, op_name);
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(self_impl);
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(other_impl);
|
||||
|
|
@ -257,9 +254,7 @@ Tensor& NestedTensor_elementwise__Tensor(
|
|||
f(self_impl->get_buffer(), other);
|
||||
return self;
|
||||
}
|
||||
NestedTensorImpl* self_impl = nullptr;
|
||||
NestedTensorImpl* other_impl = nullptr;
|
||||
std::tie(self_impl, other_impl) =
|
||||
auto [self_impl, other_impl] =
|
||||
get_elementwise_nested_tensor_impl(self, other, op_name);
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(self_impl);
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(other_impl);
|
||||
|
|
|
|||
|
|
@ -795,9 +795,7 @@ Tensor view_nested(const Tensor& self, IntArrayRef proposed_shape) {
|
|||
// reshaping underlying tensor dimensions does not change offset
|
||||
// determine reshaped size and stride
|
||||
const Tensor& sizemat = self_ptr->get_nested_sizes();
|
||||
bool viewable;
|
||||
Tensor sizemat_reshaped, stridemat_reshaped;
|
||||
std::tie(viewable, sizemat_reshaped, stridemat_reshaped) = NestedTensor_compute_size_stride(
|
||||
auto [viewable, sizemat_reshaped, stridemat_reshaped] = NestedTensor_compute_size_stride(
|
||||
sizes, strides, proposed_shape, sizemat.options());
|
||||
TORCH_CHECK(
|
||||
viewable,
|
||||
|
|
@ -888,9 +886,7 @@ Tensor reshape_nested(const Tensor& self, IntArrayRef proposed_shape) {
|
|||
// reshaping underlying tensor dimensions does not change offset
|
||||
// determine reshaped size and stride
|
||||
const Tensor& sizemat = self_ptr->get_nested_sizes();
|
||||
bool viewable{false};
|
||||
Tensor sizemat_reshaped, stridemat_reshaped;
|
||||
std::tie(viewable, sizemat_reshaped, stridemat_reshaped) = NestedTensor_compute_size_stride(
|
||||
auto [viewable, sizemat_reshaped, stridemat_reshaped] = NestedTensor_compute_size_stride(
|
||||
sizes, strides, proposed_shape, sizemat.options());
|
||||
if (viewable) {
|
||||
return self.view(proposed_shape);
|
||||
|
|
|
|||
|
|
@ -424,12 +424,7 @@ void cuda_sparse_coo_softmax(
|
|||
auto out_values_2 = out_values.view({nnz, nvalues});
|
||||
auto out_values_accessor = out_values_2.packed_accessor64<scalar_t, 2>();
|
||||
|
||||
Tensor sorted_indices;
|
||||
Tensor pool_offsets;
|
||||
Tensor pool_sizes;
|
||||
Tensor mx_buffer;
|
||||
|
||||
std::tie(sorted_indices, pool_offsets, pool_sizes, mx_buffer) =
|
||||
auto [sorted_indices, pool_offsets, pool_sizes, mx_buffer] =
|
||||
compute_pool_max<scalar_t, true>(indices, values_2, sizes, nvalues, dim);
|
||||
|
||||
auto pool_size = pool_offsets.size(0);
|
||||
|
|
@ -443,12 +438,12 @@ void cuda_sparse_coo_softmax(
|
|||
if (nvalues > 0 && pool_size > 0) {
|
||||
cuda_sparse_coo_softmax_kernel<scalar_t, LogSoftMax>
|
||||
<<<grid_size, block_size, 0, stream>>>(
|
||||
sorted_indices.data_ptr<int64_t>(),
|
||||
sorted_indices.template data_ptr<int64_t>(),
|
||||
pool_size,
|
||||
pool_sizes.data_ptr<int64_t>(),
|
||||
pool_offsets.data_ptr<int64_t>(),
|
||||
pool_sizes.template data_ptr<int64_t>(),
|
||||
pool_offsets.template data_ptr<int64_t>(),
|
||||
nvalues,
|
||||
mx_buffer.data_ptr<scalar_t>(),
|
||||
mx_buffer.template data_ptr<scalar_t>(),
|
||||
values_accessor,
|
||||
out_values_accessor);
|
||||
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
||||
|
|
@ -557,13 +552,9 @@ void cuda_sparse_coo_softmax_backward(
|
|||
thrust_ptr(out_offsets.data_ptr<int64_t>()) + out_offsets.size(0),
|
||||
thrust_ptr(lower_bound_values.data_ptr<int64_t>()));
|
||||
|
||||
Tensor sorted_indices;
|
||||
Tensor pool_offsets;
|
||||
Tensor pool_sizes;
|
||||
|
||||
/* Compute independent pools of indices */
|
||||
std::tie(
|
||||
sorted_indices, pool_offsets, pool_sizes, std::ignore) =
|
||||
auto [
|
||||
sorted_indices, pool_offsets, pool_sizes, _] =
|
||||
compute_pool_max<scalar_t, false>(
|
||||
out_indices, values_2, sizes, nvalues, dim);
|
||||
|
||||
|
|
@ -575,10 +566,10 @@ void cuda_sparse_coo_softmax_backward(
|
|||
if (nvalues > 0 && pool_size > 0) {
|
||||
cuda_sparse_coo_softmax_backward_kernel<scalar_t, LogSoftMax>
|
||||
<<<grid_size, block_size, 0, stream>>>(
|
||||
sorted_indices.data_ptr<int64_t>(),
|
||||
sorted_indices.template data_ptr<int64_t>(),
|
||||
pool_size,
|
||||
pool_sizes.data_ptr<int64_t>(),
|
||||
pool_offsets.data_ptr<int64_t>(),
|
||||
pool_sizes.template data_ptr<int64_t>(),
|
||||
pool_offsets.template data_ptr<int64_t>(),
|
||||
nvalues,
|
||||
grad_nnz,
|
||||
grad_offsets.data_ptr<int64_t>(),
|
||||
|
|
|
|||
|
|
@ -36,9 +36,7 @@ def is_tensor_list(typ: Type) -> bool:
|
|||
|
||||
def unwrap_tensor(name: str, cur_level_var: str) -> list[str]:
|
||||
result = f"""\
|
||||
Tensor {name}_value;
|
||||
optional<int64_t> {name}_bdim;
|
||||
std::tie({name}_value, {name}_bdim) = unwrapTensorAtLevel({name}, {cur_level_var});"""
|
||||
auto [{name}_value, {name}_bdim] = unwrapTensorAtLevel({name}, {cur_level_var});"""
|
||||
return textwrap.dedent(result).split("\n")
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user