set -Wsuggest-override for builds (#89852)

Stack created with [Sapling](https://sapling-scm.com). Best reviewed with [ReviewStack](https://reviewstack.dev/pytorch/pytorch/pull/89852).
* __->__ #89852
* #89851

set -Wsuggest-override for builds

Summary: This was flagged by a Meta internal build.

Test Plan: Rely on CI.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/89852
Approved by: https://github.com/malfet
This commit is contained in:
mikey dagitses 2022-12-19 22:08:44 +00:00 committed by PyTorch MergeBot
parent 8ecb49b8fb
commit 322e4b4c8a
24 changed files with 96 additions and 46 deletions

View File

@ -848,6 +848,12 @@ if(NOT MSVC)
# Suppress "The ABI for passing parameters with 64-byte alignment has changed in GCC 4.6"
string(APPEND CMAKE_CXX_FLAGS " -Wno-psabi")
endif()
if(NOT CMAKE_COMPILER_IS_GNUCXX OR GCC_VERSION VERSION_GREATER_EQUAL 9.2)
# Prior to GCC 9.2, this warning misfires when a method is
# labeled "final".
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78010
append_cxx_flag_if_supported("-Wsuggest-override" CMAKE_CXX_FLAGS)
endif()
# Use ld.gold if available, fall back to ld.bfd (the default ld) if not
if(USE_GOLD_LINKER)

View File

@ -1,4 +1,5 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/cuda/CUDAConfig.h> // for the definition of AT_CUDNN_ENABLED
#if AT_CUDNN_ENABLED()
@ -8,7 +9,13 @@
#if HAS_CUDNN_V8()
#include <ATen/cudnn/cudnn-wrapper.h>
#include <c10/macros/Macros.h>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
#include <cudnn_frontend.h>
C10_DIAGNOSTIC_POP()
#include <cudnn_frontend_find_plan.h>
#include <cudnn_frontend_get_plan.h>
#include <ATen/core/Tensor.h>

View File

@ -17,7 +17,10 @@ This file contains some of the auxiliary functions used by both Conv.cpp & Linea
#include <ATen/native/quantized/PackedParams.h>
#include <c10/core/QScheme.h>
#include <c10/util/ArrayRef.h>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
#include <cudnn_frontend.h>
C10_DIAGNOSTIC_POP()
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
@ -118,7 +121,7 @@ struct PackedConvWeightCudnn : public ConvPackedParamsBase<kSpatialDim> {
at::Tensor apply_dynamic(
const at::Tensor& input,
bool reduce_range) {
bool reduce_range) override {
TORCH_CHECK(false, "apply_dynamic is currently not reported");
}

View File

@ -512,9 +512,10 @@ __device__ __attribute__((noinline)) __attribute__((weak)) void __assert_fail(
#endif
#endif // HAS_DEMANGLE
#ifdef __clang__
#define _C10_PRAGMA__(string) _Pragma(#string)
#define _C10_PRAGMA_(string) _C10_PRAGMA__(string)
#ifdef __clang__
#define C10_CLANG_DIAGNOSTIC_PUSH() _Pragma("clang diagnostic push")
#define C10_CLANG_DIAGNOSTIC_POP() _Pragma("clang diagnostic pop")
#define C10_CLANG_DIAGNOSTIC_IGNORE(flag) \
@ -527,4 +528,29 @@ __device__ __attribute__((noinline)) __attribute__((weak)) void __assert_fail(
#define C10_CLANG_HAS_WARNING(flag) 0
#endif
#ifdef __clang__
#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) \
_C10_PRAGMA_(clang diagnostic push) \
_C10_PRAGMA_(clang diagnostic ignored "-Wunknown-warning-option") \
_C10_PRAGMA_(clang diagnostic ignored warning)
#define C10_DIAGNOSTIC_POP() _C10_PRAGMA_(clang diagnostic pop)
#elif __GNUC__
#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) \
_C10_PRAGMA_(GCC diagnostic push) \
_C10_PRAGMA_(GCC diagnostic ignored "-Wpragmas") \
_C10_PRAGMA_(GCC diagnostic ignored warning)
#define C10_DIAGNOSTIC_POP() _C10_PRAGMA_(GCC diagnostic pop)
#else
#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning)
#define C10_DIAGNOSTIC_POP()
#endif
#endif // C10_MACROS_MACROS_H_

View File

@ -1050,12 +1050,10 @@ TEST(DataLoaderTest, MakeDataLoaderDefaultsAsExpected) {
}
struct UnsizedDataset : public datasets::Dataset<UnsizedDataset> {
// NOLINTNEXTLINE(cppcoreguidelines-explicit--functions,modernize-use-override)
torch::data::Example<> get(size_t i) {
torch::data::Example<> get(size_t i) override {
return {torch::ones(i), torch::ones(i)};
}
// NOLINTNEXTLINE(cppcoreguidelines-explicit--functions,modernize-use-override)
torch::optional<size_t> size() const noexcept {
torch::optional<size_t> size() const noexcept override {
return torch::nullopt;
}
};

View File

@ -20,7 +20,7 @@ class DistAutogradTest : public ::testing::Test {
autogradContainer_ = &DistAutogradContainer::init(0);
}
virtual void TearDown() {
void TearDown() override {
autogradContainer_->releaseContext(
autogradContainer_->currentContext()->contextId());
}

View File

@ -3115,8 +3115,7 @@ TEST(TestShapeGraphLinting, Basic) {
// fusion parameters
class Composed : public ::testing::Test {
public:
// NOLINTNEXTLINE(modernize-use-override,cppcoreguidelines-explicit-virtual-functions)
void SetUp() {
void SetUp() override {
torch::jit::tensorexpr::getTEMustUseLLVMOnCPU() = false;
}
};

View File

@ -18,13 +18,12 @@ using namespace torch::jit::tensorexpr;
class GraphOpt : public ::testing::Test {
public:
// NOLINTNEXTLINE(modernize-use-override,cppcoreguidelines-explicit-virtual-functions)
void SetUp() {
void SetUp() override {
old_cat_wo_conditionals_ = getCatWoConditionals();
getCatWoConditionals() = true;
}
void TearDown() {
void TearDown() override {
getCatWoConditionals() = old_cat_wo_conditionals_;
}

View File

@ -24,8 +24,7 @@ using namespace torch::jit::tensorexpr;
class Kernel : public ::testing::Test {
public:
// NOLINTNEXTLINE(modernize-use-override,cppcoreguidelines-explicit-virtual-functions)
void SetUp() {
void SetUp() override {
getTEMustUseLLVMOnCPU() = false;
}
};

View File

@ -2292,8 +2292,7 @@ class LoopOrderHelper : public IRVisitor {
return ordering.str();
}
// NOLINTNEXTLINE(cppcoreguidelines-explicit--functions,modernize-use-override)
void visit(ForPtr v) {
void visit(ForPtr v) final {
ordering << v->var()->name_hint() << ",";
IRVisitor::visit(v);
}

View File

@ -24,8 +24,7 @@ using namespace torch::jit::tensorexpr;
class Quantization : public ::testing::Test {
public:
// NOLINTNEXTLINE(modernize-use-override,cppcoreguidelines-explicit-virtual-functions)
void SetUp() {
void SetUp() override {
getTEMustUseLLVMOnCPU() = false;
}
};

View File

@ -66,8 +66,8 @@ struct CUDAPluggableAllocator
void* malloc(size_t size, int device, cudaStream_t stream);
c10::DataPtr allocate(size_t size) const;
c10::DeleterFnPtr raw_deleter() const;
c10::DataPtr allocate(size_t size) const override;
c10::DeleterFnPtr raw_deleter() const override;
virtual void* raw_alloc(size_t nbytes) override;
virtual void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream)

View File

@ -76,7 +76,7 @@ class PyProcessGroup : public ProcessGroup {
}
c10::intrusive_ptr<Work> barrier(
const BarrierOptions& opts = BarrierOptions()) {
const BarrierOptions& opts = BarrierOptions()) override {
PYBIND11_OVERRIDE(
c10::intrusive_ptr<Work>, /* Return type */
ProcessGroup, /* Parent class */

View File

@ -1281,7 +1281,7 @@ class CudaKernelGenerator : private OptOutConstDispatch {
}
}
void handle(const LoadStoreOp* ldst) {
void handle(const LoadStoreOp* ldst) final {
// TODO:
// Need to gradually merge the code path of this
// with UnaryOp::Set for vectorization.
@ -2629,7 +2629,7 @@ class CudaKernelGenerator : private OptOutConstDispatch {
indent() << "NVFUSER_UPDATE_MAGIC_ZERO\n";
}
void handle(const kir::Swizzle2DInt* swizzle_2d) {
void handle(const kir::Swizzle2DInt* swizzle_2d) final {
TORCH_INTERNAL_ASSERT(print_inline_);
TORCH_INTERNAL_ASSERT(
swizzle_2d->swizzleType() != Swizzle2DType::NoSwizzle,
@ -2642,14 +2642,14 @@ class CudaKernelGenerator : private OptOutConstDispatch {
}
}
void handle(const kir::IntPair* int_pair) {
void handle(const kir::IntPair* int_pair) final {
const auto def = int_pair->definition();
TORCH_INTERNAL_ASSERT(
def != nullptr, "no support for un-inlined int pair yet.");
code_ << gen(def);
}
void handle(const kir::PairSelect* pair_select) {
void handle(const kir::PairSelect* pair_select) final {
if (print_inline_) {
code_ << gen(pair_select->in());
} else {

View File

@ -339,7 +339,7 @@ struct ViewOpRecord : RecordFunctor {
fd.setFusionState(outputs_.at(0).index, output);
}
virtual void print(std::ostream& os, bool close_function = true) const {
void print(std::ostream& os, bool close_function = true) const final {
RecordFunctor::print(os, false);
os << ", original_shape=[";
bool first_arg = true;
@ -426,7 +426,7 @@ struct PermuteOpRecord : RecordFunctor {
fd.setFusionState(outputs_.at(0).index, output);
}
virtual void print(std::ostream& os, bool close_function = true) const {
void print(std::ostream& os, bool close_function = true) const final {
RecordFunctor::print(os, false);
os << ", dims=[";
bool first_arg = true;
@ -510,7 +510,7 @@ struct SqueezeOpRecord : RecordFunctor {
fd.setFusionState(outputs_.at(0).index, output);
}
virtual void print(std::ostream& os, bool close_function = true) const {
void print(std::ostream& os, bool close_function = true) const final {
RecordFunctor::print(os, false);
os << ", original_shape=[";
bool first_arg = true;
@ -662,7 +662,7 @@ struct BroadcastInDimOpRecord : RecordFunctor {
fd.setFusionState(outputs_.at(0).index, output);
}
virtual void print(std::ostream& os, bool close_function = true) const {
void print(std::ostream& os, bool close_function = true) const final {
RecordFunctor::print(os, false);
os << ", output_shape=[";
bool first_arg = true;
@ -748,7 +748,7 @@ struct BroadcastOpRecord : RecordFunctor {
fd.setFusionState(outputs_.at(0).index, output);
}
virtual void print(std::ostream& os, bool close_function = true) const {
void print(std::ostream& os, bool close_function = true) const final {
RecordFunctor::print(os, false);
os << ", is_broadcast_dim=[";
bool first_arg = true;
@ -844,7 +844,7 @@ struct CastOpRecord : RecordFunctor {
fd.setFusionState(outputs_.at(0).index, output);
}
virtual void print(std::ostream& os, bool close_function = true) const {
void print(std::ostream& os, bool close_function = true) const final {
RecordFunctor::print(os, false);
os << ", dtype=" << dtypeToPyString(dtype_);
if (close_function) {
@ -897,7 +897,7 @@ struct ConstantRecord : RecordFunctor {
fd.setFusionState(outputs_.at(0).index, output);
}
virtual void print(std::ostream& os, bool close_function = true) const {
void print(std::ostream& os, bool close_function = true) const final {
RecordFunctor::print(os, false);
if (std::is_same<ValueType, bool>::value) {
os << (value_ ? "True" : "False");
@ -1038,7 +1038,7 @@ struct TensorRecord : RecordFunctor {
fd.addInput(tv);
}
virtual void print(std::ostream& os, bool close_function = true) const {
void print(std::ostream& os, bool close_function = true) const final {
RecordFunctor::print(os, false);
os << "symbolic_sizes=[";
bool first_arg = true;
@ -1231,7 +1231,7 @@ struct ReductionOpRecord : RecordFunctor {
fd.setFusionState(outputs_.at(0).index, output);
}
virtual void print(std::ostream& os, bool close_function = true) const {
void print(std::ostream& os, bool close_function = true) const final {
RecordFunctor::print(os, false);
os << ", axes=[";
bool first_arg = true;
@ -1316,7 +1316,7 @@ struct ScalarRecord : RecordFunctor {
fd.setFusionState(outputs_.at(0).index, output);
}
virtual void print(std::ostream& os, bool close_function = true) const {
void print(std::ostream& os, bool close_function = true) const final {
RecordFunctor::print(os, false);
os << "dtype=" << dtypeToPyString(dtype_);
if (close_function) {
@ -1374,7 +1374,7 @@ struct NormOpRecord : RecordFunctor {
correction_(correction),
keep_dim_(keep_dim) {}
virtual ~NormOpRecord() = default;
virtual RecordFunctor* clone() = 0;
RecordFunctor* clone() override = 0;
// I am skipping the bassel's correction value in the hash because
// I suspect we might change it to a bool from a 64-bit value
@ -1415,7 +1415,7 @@ struct NormOpRecord : RecordFunctor {
}
//! Each NormOp Child should define the operator() to build the IR
virtual void operator()(FusionDefinition& fd) = 0;
void operator()(FusionDefinition& fd) override = 0;
virtual void print(std::ostream& os, bool close_function = true) const final {
RecordFunctor::print(os, false);
@ -1645,7 +1645,7 @@ struct FullOpRecord : RecordFunctor {
fd.setFusionState(outputs_.at(0).index, output);
}
virtual void print(std::ostream& os, bool close_function = true) const {
virtual void print(std::ostream& os, bool close_function = true) const final {
RecordFunctor::print(os, false);
os << ", shape=[";
bool first_arg = true;

View File

@ -145,7 +145,7 @@ class ViewTransform : public Transform {
}
// Debugging utility to convert the transformation into a string.
virtual std::string toString() const = 0;
virtual std::string toString() const override = 0;
protected:
ViewTransform(const int64_t& index) : Transform(index) {}

View File

@ -1,6 +1,11 @@
#pragma once
#include <c10/macros/Macros.h>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
#include <onnx/shape_inference/implementation.h>
C10_DIAGNOSTIC_POP()
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/serialization/export.h>
#include <mutex>

View File

@ -3,6 +3,7 @@
#include <ATen/ATen.h>
#include <ATen/Utils.h>
#include <ATen/core/functional.h>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/Optional.h>
#include <c10/util/accumulate.h>
@ -23,7 +24,9 @@
#include <onnx/checker.h>
#include <onnx/onnx_pb.h>
#include <onnx/proto_utils.h>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
#include <onnx/shape_inference/implementation.h>
C10_DIAGNOSTIC_POP()
#include <fstream>
#include <memory>

View File

@ -360,7 +360,7 @@ class BufLiveRange : public IRVisitor {
}
}
void visit(BlockPtr v) {
void visit(BlockPtr v) override {
for (StmtPtr s : *v) {
curr_index_ += 1;
findAccAndUpdateLiveRange(s);

View File

@ -609,7 +609,7 @@ void LLVMCodeGenImpl::emitWrapper(const std::vector<llvm::Type*>& params) {
class LLVMIntrinsicsExpander : public GenericIntrinsicsExpander {
private:
ExprPtr mutate(IntrinsicsPtr v) {
ExprPtr mutate(IntrinsicsPtr v) override {
if (v->op_type() == kTanh) {
ScalarType stype = v->dtype().scalar_type();
if (stype == ScalarType::Float) {

View File

@ -1,11 +1,16 @@
#ifdef TORCH_ENABLE_LLVM
#include <c10/macros/Macros.h>
#include <torch/csrc/jit/tensorexpr/external_functions.h>
#include <torch/csrc/jit/tensorexpr/intrinsic_symbols.h>
#include <torch/csrc/jit/tensorexpr/llvm_jit.h>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
#include <llvm/ExecutionEngine/ExecutionEngine.h>
#include <llvm/ExecutionEngine/JITSymbol.h>
C10_DIAGNOSTIC_POP()
#include <llvm/ExecutionEngine/Orc/CompileUtils.h>
#include <llvm/ExecutionEngine/Orc/ExecutionUtils.h>
#include <llvm/ExecutionEngine/Orc/IRCompileLayer.h>

View File

@ -1,11 +1,14 @@
#pragma once
#ifdef TORCH_ENABLE_LLVM
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/Optional.h>
#include <torch/csrc/Export.h>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
#include <llvm/ExecutionEngine/JITSymbol.h>
C10_DIAGNOSTIC_POP()
#include <llvm/ExecutionEngine/Orc/Core.h>
#include <llvm/ExecutionEngine/Orc/ThreadSafeModule.h>
#include <llvm/Target/TargetMachine.h>

View File

@ -114,7 +114,7 @@ class TSBackendImpl : public torch::lazy::BackendImplInterface {
}
torch::lazy::BackendDataPtr GetComputationDataFromNode(
const Node* node) const {
const Node* node) const override {
auto* device_data_node = DeviceData::Cast(node);
if (!device_data_node) {
return nullptr;
@ -156,11 +156,11 @@ class TSBackendImpl : public torch::lazy::BackendImplInterface {
static_cast<c10::DeviceType>(type));
}
int64_t GetDefaultDeviceOrdinal() const {
int64_t GetDefaultDeviceOrdinal() const override {
return default_device_ordinal_;
}
virtual void SetDefaultDeviceOrdinal(int64_t ordinal) {
virtual void SetDefaultDeviceOrdinal(int64_t ordinal) override {
default_device_ordinal_ = ordinal;
}

View File

@ -46,8 +46,7 @@ class LibKinetoClient : public libkineto::ClientInterface {
(void)disableProfiler();
}
// @lint-ignore CLANGTIDY cppcoreguidelines-explicit-virtual-functions
void set_withstack(bool withStack) {
void set_withstack(bool withStack) override {
withStack_ = withStack;
}