From c141f28b648ee3c6cb0a7286f0aa100297417e74 Mon Sep 17 00:00:00 2001 From: albanD Date: Wed, 19 Oct 2022 20:56:37 +0000 Subject: [PATCH] Fix compilation warning and spurious print (#87297) Fixes compilation warning, make this warning an error and remove a random print. Pull Request resolved: https://github.com/pytorch/pytorch/pull/87297 Approved by: https://github.com/malfet --- CMakeLists.txt | 1 + torch/csrc/jit/runtime/graph_executor.cpp | 6 +++--- torch/csrc/jit/serialization/unpickler.cpp | 2 +- torch/csrc/profiler/standalone/nvtx_observer.cpp | 2 +- torch/csrc/profiler/util.cpp | 2 +- torch/utils/data/datapipes/gen_pyi.py | 1 - 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3442cbbe141..dae1dd4bc14 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -826,6 +826,7 @@ if(NOT MSVC) append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS) append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS) append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS) append_cxx_flag_if_supported("-Wno-missing-field-initializers" CMAKE_CXX_FLAGS) append_cxx_flag_if_supported("-Wno-type-limits" CMAKE_CXX_FLAGS) append_cxx_flag_if_supported("-Wno-array-bounds" CMAKE_CXX_FLAGS) diff --git a/torch/csrc/jit/runtime/graph_executor.cpp b/torch/csrc/jit/runtime/graph_executor.cpp index 0e121da32fc..c2c84eb9e4e 100644 --- a/torch/csrc/jit/runtime/graph_executor.cpp +++ b/torch/csrc/jit/runtime/graph_executor.cpp @@ -133,7 +133,7 @@ struct CaptureList { auto tensors = val.toTensorList(); sizes_.push_back(tensors.size()); - for (const at::Tensor tensor : tensors) { + for (const auto& tensor : tensors) { captureTensor(tensor, is_output); } } else { @@ -326,7 +326,7 @@ struct DifferentiableGraphBackward : public autograd::Node { void addOutputForIValue(const IValue& value) { if (value.isTensorList()) { input_tensor_lists_.insert({index_, value.toTensorList().size()}); - for (const at::Tensor tensor : value.toTensorList()) { + for (const at::Tensor& tensor : value.toTensorList()) { addOutputForTensor(tensor); index_++; } @@ -357,7 +357,7 @@ struct DifferentiableGraphBackward : public autograd::Node { if (v.isTensorList()) { auto tensors = v.toTensorList(); input_instructions_.pushTensorList(tensors.size()); - for (const at::Tensor tensor : tensors) { + for (const at::Tensor& tensor : tensors) { addInputVariable(tensor); } } else if (v.isTensor()) { diff --git a/torch/csrc/jit/serialization/unpickler.cpp b/torch/csrc/jit/serialization/unpickler.cpp index 3a0c3c85009..7b40f138c60 100644 --- a/torch/csrc/jit/serialization/unpickler.cpp +++ b/torch/csrc/jit/serialization/unpickler.cpp @@ -129,7 +129,7 @@ void restoreAccurateTypeTags(const IValue& root, const TypePtr& type_tag) { auto elem_type = w.type->containedType(0); auto lst = w.value.toList(); lst.unsafeSetElementType(elem_type); - for (const IValue item : lst) { + for (const IValue& item : lst) { Work elem = {elem_type, item}; to_process.emplace_back(std::move(elem)); } diff --git a/torch/csrc/profiler/standalone/nvtx_observer.cpp b/torch/csrc/profiler/standalone/nvtx_observer.cpp index a964f5fb493..1db70a543bc 100644 --- a/torch/csrc/profiler/standalone/nvtx_observer.cpp +++ b/torch/csrc/profiler/standalone/nvtx_observer.cpp @@ -71,7 +71,7 @@ std::list> flattenOpIdList( std::list> input_op_id_list; auto state_ptr = NVTXThreadLocalState::getTLS(); TORCH_INTERNAL_ASSERT(state_ptr, "Expected profiler state set"); - for (const c10::IValue input : list) { + for (const c10::IValue& input : list) { if (input.isTensor()) { const at::Tensor& tensor = input.toTensor(); auto producer_op_pair = state_ptr->getOpIdFromInput(tensor); diff --git a/torch/csrc/profiler/util.cpp b/torch/csrc/profiler/util.cpp index f8821e0c649..08a20c84805 100644 --- a/torch/csrc/profiler/util.cpp +++ b/torch/csrc/profiler/util.cpp @@ -198,7 +198,7 @@ std::vector> flattenList( c10::List list, std::string fn_name) { std::vector> tensor_dims; - for (const c10::IValue input : list) { + for (const c10::IValue& input : list) { if (input.isTensor()) { const at::Tensor& tensor = input.toTensor(); if (tensor.defined()) { diff --git a/torch/utils/data/datapipes/gen_pyi.py b/torch/utils/data/datapipes/gen_pyi.py index dee545a5e50..e89031075cf 100644 --- a/torch/utils/data/datapipes/gen_pyi.py +++ b/torch/utils/data/datapipes/gen_pyi.py @@ -231,5 +231,4 @@ def main() -> None: if __name__ == '__main__': - print("Generating Python interface file 'datapipe.pyi'...") main()