Fix some issues detected by static analysis tools (#131989)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/131989
Approved by: https://github.com/ezyang
This commit is contained in:
cyy 2024-08-02 04:18:55 +00:00 committed by PyTorch MergeBot
parent 5ea0f51187
commit 35d14d22a0
8 changed files with 22 additions and 44 deletions

View File

@ -277,7 +277,7 @@ static void fill__Tensor_batch_rule(
self.fill_(other);
return;
}
if (!self_bdim && other_bdim) {
if (!self_bdim) {
vmapIncompatibleInplaceError("fill_");
}
auto self_and_other = _binary_pointwise_helper(

View File

@ -20,7 +20,6 @@
#include <caffe2/perfkernels/embedding_lookup_idx.h>
#endif
#include <algorithm>
#include <cstring>
#include <tuple>
#include <utility>
@ -123,7 +122,6 @@ index_select_add(
auto* select_indices_data = select_indices.const_data_ptr<index_t>();
auto* src_data = src.const_data_ptr<data_t>();
auto* output_data = output.data_ptr<data_t>();
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
index_t* bag_size_data = nullptr;
if (bag_size.defined()) {
bag_size_data = bag_size.data_ptr<index_t>();
@ -148,9 +146,8 @@ index_select_add(
at::native::cpublas::axpy<data_t>(ddim, 1,
src_data + src_stride0 * idx, src_stride1,
output_data + output_stride0 * add_indices_data[i], output_stride1);
} else if (bag_size.defined()) {
} else if (bag_size_data) {
// Decrement bag_size to reflect that the index is padded
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
bag_size_data[add_indices_data[i]]--;
}
}
@ -312,7 +309,6 @@ index_select_add(
TORCH_CHECK(select_indices.numel() == add_indices.numel());
auto* src_data = src.const_data_ptr<data_t>();
auto* add_indices_data = add_indices.const_data_ptr<index_t>();
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
index_t* bag_size_data = nullptr;
if (bag_size.defined()) {
bag_size_data = bag_size.data_ptr<index_t>();
@ -354,9 +350,8 @@ index_select_add(
output_data_fp32 + ddim * add_indices_data[i],
1);
} else if (bag_size.defined()) {
} else if (bag_size_data) {
// Decrement bag_size to reflect that the index is padded
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
bag_size_data[add_indices_data[i]]--;
}
}
@ -459,7 +454,6 @@ index_select_add(const Tensor &select_indices,
AT_ASSERT(select_indices.numel() == add_indices.numel());
auto* src_data = src.const_data_ptr<float>();
auto* add_indices_data = add_indices.const_data_ptr<index_t>();
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
index_t* bag_size_data = nullptr;
if (bag_size.defined()) {
bag_size_data = bag_size.data_ptr<index_t>();
@ -486,9 +480,8 @@ index_select_add(const Tensor &select_indices,
src_stride1,
output_data + output_stride0 * add_indices_data[i],
output_stride1);
} else if (bag_size.defined()) {
} else if (bag_size_data) {
// Decrement bag_size to reflect that the index is padded
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
bag_size_data[add_indices_data[i]]--;
}
}
@ -517,7 +510,6 @@ index_select_scale_add(
auto* select_indices_data = select_indices.const_data_ptr<index_t>();
auto* src_data = src.const_data_ptr<data_t>();
auto* output_data = output.data_ptr<data_t>();
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
index_t* bag_size_data = nullptr;
if (bag_size.defined()) {
bag_size_data = bag_size.data_ptr<index_t>();
@ -548,9 +540,8 @@ index_select_scale_add(
for (const auto j : c10::irange(ddim)) {
output_base[j * output_stride1] += src_base[j * src_stride1] * scale;
}
} else if (bag_size.defined()) {
} else if (bag_size_data) {
// Decrement bag_size to reflect that the index is padded
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
bag_size_data[add_indices_data[i]]--;
}
}
@ -701,7 +692,6 @@ index_select_scale_add(
AT_ASSERT(select_indices.numel() == add_indices.numel());
auto* src_data = src.const_data_ptr<data_t>();
auto* add_indices_data = add_indices.const_data_ptr<index_t>();
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
index_t* bag_size_data = nullptr;
if (bag_size.defined()) {
bag_size_data = bag_size.data_ptr<index_t>();
@ -735,9 +725,8 @@ index_select_scale_add(
output_base_fp32[j] += static_cast<float>(src_base[j * src_stride1]) *
static_cast<float>(scale);
}
} else if (bag_size.defined()) {
} else if (bag_size_data) {
// Decrement bag_size to reflect that the index is padded
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
bag_size_data[add_indices_data[i]]--;
}
}
@ -840,7 +829,6 @@ index_select_scale_add(const Tensor &select_indices,
AT_ASSERT(select_indices.numel() == add_indices.numel());
auto* src_data = src.const_data_ptr<float>();
auto* add_indices_data = add_indices.const_data_ptr<index_t>();
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
index_t* bag_size_data = nullptr;
if (bag_size.defined()) {
bag_size_data = bag_size.data_ptr<index_t>();
@ -869,9 +857,8 @@ index_select_scale_add(const Tensor &select_indices,
for (const auto j : c10::irange(ddim)) {
output_base[j * output_stride1] += src_base[j * src_stride1] * scale;
}
} else if (bag_size.defined()) {
} else if (bag_size_data) {
// Decrement bag_size to reflect that the index is padded
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
bag_size_data[add_indices_data[i]]--;
}
}

View File

@ -79,7 +79,7 @@ std::tuple<Tensor, Tensor> fake_quantize_per_channel_affine_cachemask(
auto Y = at::empty_like(self, self.options(), MemoryFormat::Preserve);
auto mask = at::empty_like(self, at::kBool, MemoryFormat::Preserve);
std::vector<int64_t> expected_shape(self.dim(), 1);
c10::DimVector expected_shape(self.dim(), 1);
expected_shape[axis] = self.size(axis);
TensorIterator iter = TensorIteratorConfig()
@ -214,18 +214,17 @@ std::tuple<Tensor, Tensor, Tensor> _fake_quantize_learnable_per_channel_affine_b
auto dX = at::empty_like(X, X.options(), MemoryFormat::Preserve);
auto dScale_vec = at::empty_like(X, X.options(), MemoryFormat::Preserve);
auto dZeroPoint_vec = at::empty_like(X, X.options(), MemoryFormat::Preserve);
int numDimensions = X.ndimension();
auto numDimensions = X.ndimension();
// Create an axis mask for vectorizing and reshaping the scale and zero point tensors
// into the same shapes as X along the channel axis.
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
int64_t* axis_mask = (int64_t *) calloc(numDimensions, sizeof(int64_t));
c10::DimVector axis_mask(numDimensions);
for (const auto i : c10::irange(numDimensions)) {
axis_mask[i] = (i == axis) ? X.size(axis) : 1;
}
auto X_shape = X.sizes();
auto scale_vectorized = scale.reshape(at::IntArrayRef(axis_mask, numDimensions)).expand(X_shape);
auto zero_point_vectorized = zero_point_rounded.reshape(at::IntArrayRef(axis_mask, numDimensions)).expand(X_shape);
auto scale_vectorized = scale.reshape(at::IntArrayRef(axis_mask.data(), numDimensions)).expand(X_shape);
auto zero_point_vectorized = zero_point_rounded.reshape(at::IntArrayRef(axis_mask.data(), numDimensions)).expand(X_shape);
auto iter = TensorIteratorConfig()
.add_output(dX)
@ -244,8 +243,7 @@ std::tuple<Tensor, Tensor, Tensor> _fake_quantize_learnable_per_channel_affine_b
// Create a collection of axes that include all but the channel axis for
// reduction when summing over the dScale and dZeroPoint tensors.
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
int64_t* axis_for_reduction = (int64_t*) calloc(numElements, sizeof(int64_t));
c10::DimVector axis_for_reduction(numElements);
for (const auto i : c10::irange(axis)) {
axis_for_reduction[i] = i;
}
@ -253,13 +251,9 @@ std::tuple<Tensor, Tensor, Tensor> _fake_quantize_learnable_per_channel_affine_b
axis_for_reduction[i] = i + 1;
}
auto dScale = dScale_vec.sum(at::IntArrayRef(axis_for_reduction, numElements));
auto dZeroPoint = dZeroPoint_vec.sum(at::IntArrayRef(axis_for_reduction, numElements));
auto dScale = dScale_vec.sum(at::IntArrayRef(axis_for_reduction.data(), numElements));
auto dZeroPoint = dZeroPoint_vec.sum(at::IntArrayRef(axis_for_reduction.data(), numElements));
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
free(axis_mask);
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
free(axis_for_reduction);
return std::make_tuple(dX, dScale, dZeroPoint);
}
} // namespace native

View File

@ -3,7 +3,6 @@
#include <c10/util/irange.h>
#include <cmath>
#include <tuple>
#include <vector>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
@ -76,8 +75,7 @@ std::tuple<at::Tensor, at::Tensor> choose_qparams_fake_quant(
float* x_max_data = inp_running_max.data_ptr<float>();
for (const auto i : c10::irange(inp_running_min.numel())) {
#ifdef USE_FBGEMM
fbgemm::TensorQuantizationParams x_qparams{};
x_qparams = fbgemm::ChooseQuantizationParams(
auto x_qparams = fbgemm::ChooseQuantizationParams(
x_min_data[i],
x_max_data[i],
qmin,
@ -88,8 +86,7 @@ std::tuple<at::Tensor, at::Tensor> choose_qparams_fake_quant(
scale[i] = x_qparams.scale;
zero_point[i] = x_qparams.zero_point;
#else
quant_utils::TensorQuantizationParams x_qparams{};
x_qparams = quant_utils::ChooseQuantizationParams(
auto x_qparams = quant_utils::ChooseQuantizationParams(
x_min_data[i],
x_max_data[i],
qmin,

View File

@ -33,8 +33,7 @@ struct UsageStream {
UsageStream() = default;
UsageStream(cudaStream_t s, c10::DeviceIndex d) : stream(s), device(d) {}
UsageStream(const UsageStream& us) = default;
UsageStream(const UsageStream&& us) noexcept
: stream(us.stream), device(us.device) {}
UsageStream(UsageStream&& us) noexcept = default;
UsageStream& operator=(const UsageStream& other) = default;
UsageStream& operator=(UsageStream&& other) noexcept = default;
};

View File

@ -120,7 +120,7 @@ Tensor& detach_(Tensor& self) {
self.detach_();
}
if (jit::tracer::isTracing()) {
if (jit::tracer::isTracing() && node) {
jit::tracer::addOutput(node, self);
}
return self;

View File

@ -130,6 +130,7 @@ at::Tensor LazyNativeFunctions::_copy_from_and_resize(
// at this point we know dst is a lazy tensor
auto* dest_impl =
dynamic_cast<torch::lazy::LTCTensorImpl*>(dst.unsafeGetTensorImpl());
TORCH_CHECK(dest_impl);
dest_impl->tensor()->UpdateFromTensorOut(self_tensor);
dest_impl->force_refresh_sizes();
}

View File

@ -975,7 +975,7 @@ static Tensor sparse_compressed_tensor_ctor_worker(
(required_layout
? r.layoutWithDefault(ARG_LAYOUT, required_layout.value())
: r.layoutOptional(ARG_LAYOUT));
if (required_layout && layout) {
if (required_layout) {
TORCH_CHECK(
layout.value() == required_layout.value(),
name,
@ -1031,7 +1031,7 @@ static Tensor sparse_compressed_tensor_ctor_worker(
(required_layout
? r.layoutWithDefault(ARG_LAYOUT1, required_layout.value())
: r.layoutOptional(ARG_LAYOUT1));
if (required_layout && layout) {
if (required_layout) {
TORCH_CHECK(
layout.value() == required_layout.value(),
name,