From 96e3b3ac724c4e262dc8fc05da18ebcbda6473b8 Mon Sep 17 00:00:00 2001 From: Nikita Shulga Date: Mon, 27 Mar 2023 18:46:09 +0000 Subject: [PATCH] [BE] Cleanup CMake flag suppressions (#97584) Use `append_cxx_flag_if_supported` to determine whether or not `-Werror` is supported Do not suppress deprecation warnings if glog is not used/installed, as the way check is written right now, it will suppress deprecations even if `glog` is not installed. Similarly, do not suppress deprecations on MacOS simply because we are compiling with protobuf. Fix deprecation warnings in: - MPS by replacing `MTLResourceOptionCPUCacheModeDefault`->`MTLResourceCPUCacheModeDefaultCache` - In GTests by replacing `TYPED_TEST_CASE`->`TYPED_TEST_SUITE` - In `codegen/onednn/interface.cpp`, by using passing `Stack` by reference rathern than pointer. Do not guard calls to `append_cxx_flag_if_supported` with `if(CLANG)` or `if(GCC)`. Fix some deprecated calls in `Metal` hide more complex exception under `C10_CLANG_DIAGNOSTIC_IGNORE` Pull Request resolved: https://github.com/pytorch/pytorch/pull/97584 Approved by: https://github.com/kit1980 --- CMakeLists.txt | 31 ++++------ aten/src/ATen/native/metal/MetalContext.mm | 5 +- .../native/metal/mpscnn/MPSCNNNeuronOp.mm | 7 +++ .../ATen/native/metal/mpscnn/MPSImageUtils.mm | 4 +- aten/src/ATen/native/metal/ops/MetalConcat.mm | 6 +- .../ATen/native/metal/ops/MetalTranspose.mm | 2 +- aten/src/ATen/native/mps/operations/Copy.mm | 4 +- .../ATen/native/mps/operations/UnaryOps.mm | 2 +- aten/src/ATen/test/ExclusivelyOwned_test.cpp | 2 +- aten/src/ATen/test/MaybeOwned_test.cpp | 2 +- aten/src/ATen/test/vec_test_all_types.cpp | 58 +++++++++---------- c10/test/util/bfloat16_test.cpp | 2 +- c10/test/util/optional_test.cpp | 8 +-- cmake/MiscCheck.cmake | 30 +++++----- cmake/ProtoBuf.cmake | 5 -- cmake/public/utils.cmake | 1 - test/cpp/jit/test_lite_interpreter.cpp | 2 +- torch/CMakeLists.txt | 5 ++ torch/csrc/jit/codegen/onednn/interface.cpp | 6 +- 19 files changed, 89 insertions(+), 93 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6de6ba8a483..d828ac13d18 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -808,7 +808,6 @@ if(NOT MSVC) append_cxx_flag_if_supported("-Wno-unused-result" CMAKE_CXX_FLAGS) append_cxx_flag_if_supported("-Wno-strict-overflow" CMAKE_CXX_FLAGS) append_cxx_flag_if_supported("-Wno-strict-aliasing" CMAKE_CXX_FLAGS) - append_cxx_flag_if_supported("-Wno-error=deprecated-declarations" CMAKE_CXX_FLAGS) append_cxx_flag_if_supported("-Wvla-extension" CMAKE_CXX_FLAGS) if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") string(APPEND CMAKE_CXX_FLAGS " -Wno-range-loop-analysis") @@ -854,19 +853,13 @@ if(NOT MSVC) append_cxx_flag_if_supported("-Wno-error=pedantic" CMAKE_CXX_FLAGS) append_cxx_flag_if_supported("-Wno-error=old-style-cast" CMAKE_CXX_FLAGS) - # These flags are not available in GCC-4.8.5. Set only when using clang. - # Compared against https://gcc.gnu.org/onlinedocs/gcc-4.8.5/gcc/Option-Summary.html - if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") - append_cxx_flag_if_supported("-Wconstant-conversion" CMAKE_CXX_FLAGS) - append_cxx_flag_if_supported("-Wno-invalid-partial-specialization" CMAKE_CXX_FLAGS) - append_cxx_flag_if_supported("-Wno-unused-private-field" CMAKE_CXX_FLAGS) - append_cxx_flag_if_supported("-Wno-aligned-allocation-unavailable" CMAKE_CXX_FLAGS) - append_cxx_flag_if_supported("-Wno-missing-braces" CMAKE_CXX_FLAGS) - append_cxx_flag_if_supported("-Wunused-lambda-capture" CMAKE_CXX_FLAGS) - append_cxx_flag_if_supported("-Qunused-arguments" CMAKE_CXX_FLAGS) - if(${USE_COLORIZE_OUTPUT}) - endif() - endif() + append_cxx_flag_if_supported("-Wconstant-conversion" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Wno-invalid-partial-specialization" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Wno-unused-private-field" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Wno-aligned-allocation-unavailable" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Wno-missing-braces" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Wunused-lambda-capture" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Qunused-arguments" CMAKE_CXX_FLAGS) if(${USE_COLORIZE_OUTPUT}) append_cxx_flag_if_supported("-fcolor-diagnostics" CMAKE_CXX_FLAGS) @@ -879,17 +872,13 @@ if(NOT MSVC) string(APPEND CMAKE_CXX_FLAGS " -faligned-new") endif() if(WERROR) - check_cxx_compiler_flag("-Werror" COMPILER_SUPPORT_WERROR) + append_cxx_flag_if_supported("-Werror" CMAKE_CXX_FLAGS) if(NOT COMPILER_SUPPORT_WERROR) set(WERROR FALSE) - else() - string(APPEND CMAKE_CXX_FLAGS " -Werror") endif() - endif(WERROR) - if(NOT APPLE) - append_cxx_flag_if_supported("-Wno-unused-but-set-variable" CMAKE_CXX_FLAGS) - append_cxx_flag_if_supported("-Wno-maybe-uninitialized" CMAKE_CXX_FLAGS) endif() + append_cxx_flag_if_supported("-Wno-unused-but-set-variable" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Wno-maybe-uninitialized" CMAKE_CXX_FLAGS) string(APPEND CMAKE_CXX_FLAGS_DEBUG " -fno-omit-frame-pointer -O0") string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0") append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS) diff --git a/aten/src/ATen/native/metal/MetalContext.mm b/aten/src/ATen/native/metal/MetalContext.mm index c9571757f24..2e63a092105 100644 --- a/aten/src/ATen/native/metal/MetalContext.mm +++ b/aten/src/ATen/native/metal/MetalContext.mm @@ -53,9 +53,12 @@ using namespace at::native::metal; isOperatingSystemAtLeastVersion:supportedVer]) { return false; } +C10_CLANG_DIAGNOSTIC_PUSH() +C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-declarations") if (![_device supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v3]) { return false; } +C10_CLANG_DIAGNOSTIC_POP() #else return false; #endif @@ -136,7 +139,7 @@ using namespace at::native::metal; - (id)emptyMTLBuffer:(int64_t) size { TORCH_CHECK(_device); id buffer = [_device newBufferWithLength:size - options:MTLResourceOptionCPUCacheModeWriteCombined]; + options:MTLResourceCPUCacheModeWriteCombined]; return buffer; } diff --git a/aten/src/ATen/native/metal/mpscnn/MPSCNNNeuronOp.mm b/aten/src/ATen/native/metal/mpscnn/MPSCNNNeuronOp.mm index 1b322f9a97e..e722f2765c0 100644 --- a/aten/src/ATen/native/metal/mpscnn/MPSCNNNeuronOp.mm +++ b/aten/src/ATen/native/metal/mpscnn/MPSCNNNeuronOp.mm @@ -1,6 +1,11 @@ #import #import +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-declarations") + @implementation MPSCNNNeuronOp + (MPSCNNNeuronHardSigmoid*)hardSigmoid API_AVAILABLE(ios(11.0), macos(10.13)) { @@ -70,6 +75,8 @@ @end +C10_CLANG_DIAGNOSTIC_POP() + API_AVAILABLE(ios(11.3), macos(10.13), macCatalyst(13.0)) @implementation MPSCNNNeuronOpDescriptor diff --git a/aten/src/ATen/native/metal/mpscnn/MPSImageUtils.mm b/aten/src/ATen/native/metal/mpscnn/MPSImageUtils.mm index b72da18e4e9..b404581782d 100644 --- a/aten/src/ATen/native/metal/mpscnn/MPSImageUtils.mm +++ b/aten/src/ATen/native/metal/mpscnn/MPSImageUtils.mm @@ -36,7 +36,7 @@ MPSImage* createStaticImage(const float* src, IntArrayRef sizes) { int64_t size_bytes = c10::multiply_integers(sizes) * sizeof(float); id buff = [[MetalContext sharedInstance].device newBufferWithLength:size_bytes - options:MTLResourceOptionCPUCacheModeWriteCombined]; + options:MTLResourceCPUCacheModeWriteCombined]; memcpy(buff.contents, src, size_bytes); MPSImage* output = createStaticImage(sizes); id state = [[MetalContext sharedInstance] @@ -171,7 +171,7 @@ void copyImageToFloatBuffer(float* dst, MPSImage* image) { int64_t size_bytes = c10::multiply_integers([image sizes]) * sizeof(float); id buffer = [[MetalContext sharedInstance].device newBufferWithLength:size_bytes - options:MTLResourceOptionCPUCacheModeDefault]; + options:MTLResourceCPUCacheModeDefaultCache]; id cb = [MetalContext sharedInstance].commandQueue.commandBuffer; diff --git a/aten/src/ATen/native/metal/ops/MetalConcat.mm b/aten/src/ATen/native/metal/ops/MetalConcat.mm index 8c28568d310..be9d87d8fe5 100644 --- a/aten/src/ATen/native/metal/ops/MetalConcat.mm +++ b/aten/src/ATen/native/metal/ops/MetalConcat.mm @@ -33,7 +33,7 @@ Tensor cat_batch(const Tensor& tensor, const ITensorListRef& tensors, MetalTenso X, "copy_offset", "copy_offset_nonarray")]; id offsetBuffer = [[MetalContext sharedInstance].device newBufferWithLength:1 * sizeof(ushort) - options:MTLResourceOptionCPUCacheModeWriteCombined]; + options:MTLResourceCPUCacheModeWriteCombined]; ushort* offsetBufferPtr = (ushort*)[offsetBuffer contents]; offsetBufferPtr[0] = cat_dim4_pointer; @@ -91,7 +91,7 @@ Tensor cat_feature(const Tensor& tensor, const ITensorListRef& tensors, MetalTen ]]; id offsetBuffer = [[MetalContext sharedInstance].device newBufferWithLength:6 * sizeof(ushort) - options:MTLResourceOptionCPUCacheModeWriteCombined]; + options:MTLResourceCPUCacheModeWriteCombined]; ushort* offsetBufferPtr = (ushort*)[offsetBuffer contents]; offsetBufferPtr[0] = (X.featureChannels + tex_offset + 3) / 4; offsetBufferPtr[1] = (Y.featureChannels + 3) / 4; @@ -141,7 +141,7 @@ Tensor cat_feature(const Tensor& tensor, const ITensorListRef& tensors, MetalTen ]]; id offsetBuffer = [[MetalContext sharedInstance].device newBufferWithLength:2 * sizeof(ushort) - options:MTLResourceOptionCPUCacheModeWriteCombined]; + options:MTLResourceCPUCacheModeWriteCombined]; ushort* offsetBufferPtr = (ushort*)[offsetBuffer contents]; offsetBufferPtr[0] = channel_offset / 4; offsetBufferPtr[1] = (Y.featureChannels + 3) / 4; diff --git a/aten/src/ATen/native/metal/ops/MetalTranspose.mm b/aten/src/ATen/native/metal/ops/MetalTranspose.mm index 0386c84f21c..e1b57a2a401 100644 --- a/aten/src/ATen/native/metal/ops/MetalTranspose.mm +++ b/aten/src/ATen/native/metal/ops/MetalTranspose.mm @@ -19,7 +19,7 @@ template id _makeMTLBuffer(const std::vector& src) { id buffer = [[MetalContext sharedInstance].device newBufferWithLength:src.size() * sizeof(T) - options:MTLResourceOptionCPUCacheModeWriteCombined]; + options:MTLResourceCPUCacheModeWriteCombined]; memcpy(buffer.contents, src.data(), src.size() * sizeof(T)); return buffer; } diff --git a/aten/src/ATen/native/mps/operations/Copy.mm b/aten/src/ATen/native/mps/operations/Copy.mm index 241173bc366..a969aca23df 100644 --- a/aten/src/ATen/native/mps/operations/Copy.mm +++ b/aten/src/ATen/native/mps/operations/Copy.mm @@ -126,7 +126,7 @@ static at::Tensor& copy_from_mps_(at::Tensor& dst_, const at::Tensor& src_, bool size_t dst_tensor_nbytes = dst.nbytes(); @autoreleasepool { - MTLResourceOptions options = MTLResourceOptionCPUCacheModeDefault | MTLResourceStorageModeShared; + MTLResourceOptions options = MTLResourceCPUCacheModeDefaultCache | MTLResourceStorageModeShared; NSUInteger alignedLength = 0; void* host_dst = dst.storage().data(); @@ -189,7 +189,7 @@ static void copy_to_mps_stride_contig(at::Tensor& dst, const at::Tensor& src, bo TORCH_INTERNAL_ASSERT(src.dtype() == dst.dtype() && src.strides() == dst.strides() && is_strided_contiguous(src)); @autoreleasepool { - MTLResourceOptions options = MTLResourceOptionCPUCacheModeDefault | MTLResourceStorageModeShared; + MTLResourceOptions options = MTLResourceCPUCacheModeDefaultCache | MTLResourceStorageModeShared; NSUInteger alignedLength = 0; NSUInteger sourceOffset = 0; diff --git a/aten/src/ATen/native/mps/operations/UnaryOps.mm b/aten/src/ATen/native/mps/operations/UnaryOps.mm index 147b79d65df..4a062da9022 100644 --- a/aten/src/ATen/native/mps/operations/UnaryOps.mm +++ b/aten/src/ATen/native/mps/operations/UnaryOps.mm @@ -372,7 +372,7 @@ TORCH_IMPL_FUNC(cumsum_out_mps) // issue #103810551: cumsum is horribly broken for int8, int16 and as chances for overflow is pretty high, cast to // int32 fixed in macOS 13.3 - bool castInputData = (isIntegralType(input.scalar_type()) && input.scalar_type() != ScalarType::Int && + bool castInputData = (isIntegralType(input.scalar_type(), false) && input.scalar_type() != ScalarType::Int && input.scalar_type() != ScalarType::Long); TORCH_CHECK(macOS13_3_plus || input.scalar_type() != ScalarType::Long, diff --git a/aten/src/ATen/test/ExclusivelyOwned_test.cpp b/aten/src/ATen/test/ExclusivelyOwned_test.cpp index 7b88a2c400d..661cb0fd860 100644 --- a/aten/src/ATen/test/ExclusivelyOwned_test.cpp +++ b/aten/src/ATen/test/ExclusivelyOwned_test.cpp @@ -66,7 +66,7 @@ using ExclusivelyOwnedTypes = ::testing::Types< caffe2::Tensor >; -TYPED_TEST_CASE(ExclusivelyOwnedTest, ExclusivelyOwnedTypes); +TYPED_TEST_SUITE(ExclusivelyOwnedTest, ExclusivelyOwnedTypes); TYPED_TEST(ExclusivelyOwnedTest, DefaultConstructor) { c10::ExclusivelyOwned defaultConstructed; diff --git a/aten/src/ATen/test/MaybeOwned_test.cpp b/aten/src/ATen/test/MaybeOwned_test.cpp index 3063c5b069f..d3579059e34 100644 --- a/aten/src/ATen/test/MaybeOwned_test.cpp +++ b/aten/src/ATen/test/MaybeOwned_test.cpp @@ -197,7 +197,7 @@ using MaybeOwnedTypes = ::testing::Types< c10::IValue >; -TYPED_TEST_CASE(MaybeOwnedTest, MaybeOwnedTypes); +TYPED_TEST_SUITE(MaybeOwnedTest, MaybeOwnedTypes); TYPED_TEST(MaybeOwnedTest, SimpleDereferencingString) { assertBorrow(this->borrowed, this->borrowFrom); diff --git a/aten/src/ATen/test/vec_test_all_types.cpp b/aten/src/ATen/test/vec_test_all_types.cpp index 6cd9f0d0972..6b5f2819823 100644 --- a/aten/src/ATen/test/vec_test_all_types.cpp +++ b/aten/src/ATen/test/vec_test_all_types.cpp @@ -70,35 +70,35 @@ namespace { using FloatIntTestedTypes = ::testing::Types; using ComplexTypes = ::testing::Types; using BFloatTestedTypes = ::testing::Types; - TYPED_TEST_CASE(Memory, ALLTestedTypes); - TYPED_TEST_CASE(Arithmetics, FloatIntTestedTypes); - TYPED_TEST_CASE(Comparison, RealFloatIntTestedTypes); - TYPED_TEST_CASE(Bitwise, FloatIntTestedTypes); - TYPED_TEST_CASE(MinMax, RealFloatIntTestedTypes); - TYPED_TEST_CASE(Nan, RealFloatTestedTypes); - TYPED_TEST_CASE(Interleave, RealFloatIntTestedTypes); - TYPED_TEST_CASE(SignManipulation, FloatIntTestedTypes); - TYPED_TEST_CASE(Rounding, RealFloatTestedTypes); - TYPED_TEST_CASE(SqrtAndReciprocal, FloatTestedTypes); - TYPED_TEST_CASE(SqrtAndReciprocalReal, RealFloatTestedTypes); - TYPED_TEST_CASE(FractionAndRemainderReal, RealFloatTestedTypes); - TYPED_TEST_CASE(Trigonometric, RealFloatTestedTypes); - TYPED_TEST_CASE(ErrorFunctions, RealFloatTestedTypes); - TYPED_TEST_CASE(Exponents, RealFloatTestedTypes); - TYPED_TEST_CASE(Hyperbolic, RealFloatTestedTypes); - TYPED_TEST_CASE(InverseTrigonometricReal, RealFloatTestedTypes); - TYPED_TEST_CASE(InverseTrigonometric, FloatTestedTypes); - TYPED_TEST_CASE(LGamma, RealFloatTestedTypes); - TYPED_TEST_CASE(Logarithm, FloatTestedTypes); - TYPED_TEST_CASE(LogarithmReals, RealFloatTestedTypes); - TYPED_TEST_CASE(Pow, RealFloatTestedTypes); - TYPED_TEST_CASE(RealTests, RealFloatTestedTypes); - TYPED_TEST_CASE(RangeFactories, FloatIntTestedTypes); - TYPED_TEST_CASE(BitwiseFloatsAdditional, RealFloatTestedTypes); - TYPED_TEST_CASE(BitwiseFloatsAdditional2, FloatTestedTypes); - TYPED_TEST_CASE(QuantizationTests, QuantTestedTypes); - TYPED_TEST_CASE(FunctionalTests, RealFloatIntTestedTypes); - TYPED_TEST_CASE(FunctionalBF16Tests, BFloatTestedTypes); + TYPED_TEST_SUITE(Memory, ALLTestedTypes); + TYPED_TEST_SUITE(Arithmetics, FloatIntTestedTypes); + TYPED_TEST_SUITE(Comparison, RealFloatIntTestedTypes); + TYPED_TEST_SUITE(Bitwise, FloatIntTestedTypes); + TYPED_TEST_SUITE(MinMax, RealFloatIntTestedTypes); + TYPED_TEST_SUITE(Nan, RealFloatTestedTypes); + TYPED_TEST_SUITE(Interleave, RealFloatIntTestedTypes); + TYPED_TEST_SUITE(SignManipulation, FloatIntTestedTypes); + TYPED_TEST_SUITE(Rounding, RealFloatTestedTypes); + TYPED_TEST_SUITE(SqrtAndReciprocal, FloatTestedTypes); + TYPED_TEST_SUITE(SqrtAndReciprocalReal, RealFloatTestedTypes); + TYPED_TEST_SUITE(FractionAndRemainderReal, RealFloatTestedTypes); + TYPED_TEST_SUITE(Trigonometric, RealFloatTestedTypes); + TYPED_TEST_SUITE(ErrorFunctions, RealFloatTestedTypes); + TYPED_TEST_SUITE(Exponents, RealFloatTestedTypes); + TYPED_TEST_SUITE(Hyperbolic, RealFloatTestedTypes); + TYPED_TEST_SUITE(InverseTrigonometricReal, RealFloatTestedTypes); + TYPED_TEST_SUITE(InverseTrigonometric, FloatTestedTypes); + TYPED_TEST_SUITE(LGamma, RealFloatTestedTypes); + TYPED_TEST_SUITE(Logarithm, FloatTestedTypes); + TYPED_TEST_SUITE(LogarithmReals, RealFloatTestedTypes); + TYPED_TEST_SUITE(Pow, RealFloatTestedTypes); + TYPED_TEST_SUITE(RealTests, RealFloatTestedTypes); + TYPED_TEST_SUITE(RangeFactories, FloatIntTestedTypes); + TYPED_TEST_SUITE(BitwiseFloatsAdditional, RealFloatTestedTypes); + TYPED_TEST_SUITE(BitwiseFloatsAdditional2, FloatTestedTypes); + TYPED_TEST_SUITE(QuantizationTests, QuantTestedTypes); + TYPED_TEST_SUITE(FunctionalTests, RealFloatIntTestedTypes); + TYPED_TEST_SUITE(FunctionalBF16Tests, BFloatTestedTypes); TYPED_TEST(Memory, UnAlignedLoadStore) { using vec = TypeParam; using VT = ValueType; diff --git a/c10/test/util/bfloat16_test.cpp b/c10/test/util/bfloat16_test.cpp index 0de6882b46a..d918aaa026a 100644 --- a/c10/test/util/bfloat16_test.cpp +++ b/c10/test/util/bfloat16_test.cpp @@ -180,7 +180,7 @@ TEST_P(BFloat16Test, BFloat16RNETest) { EXPECT_EQ(GetParam().rne, rounded); } -INSTANTIATE_TEST_CASE_P( +INSTANTIATE_TEST_SUITE_P( BFloat16Test_Instantiation, BFloat16Test, ::testing::Values( diff --git a/c10/test/util/optional_test.cpp b/c10/test/util/optional_test.cpp index ac976b4b16f..fa30929f8ec 100644 --- a/c10/test/util/optional_test.cpp +++ b/c10/test/util/optional_test.cpp @@ -63,7 +63,7 @@ static_assert( sizeof(c10::optional) == sizeof(c10::IntArrayRef), "c10::optional should be size-optimized"); -TYPED_TEST_CASE(OptionalTest, OptionalTypes); +TYPED_TEST_SUITE(OptionalTest, OptionalTypes); TYPED_TEST(OptionalTest, Empty) { typename TestFixture::optional empty; @@ -111,11 +111,11 @@ TEST_P(SelfCompareTest, SelfCompare) { EXPECT_THAT(x, Not(Gt(x))); } -INSTANTIATE_TEST_CASE_P( +INSTANTIATE_TEST_SUITE_P( nullopt, SelfCompareTest, testing::Values(c10::nullopt)); -INSTANTIATE_TEST_CASE_P( +INSTANTIATE_TEST_SUITE_P( int, SelfCompareTest, testing::Values(c10::make_optional(2))); @@ -158,7 +158,7 @@ using CmpTestTypes = testing::Types< std::pair>>; template class CmpTest : public testing::Test {}; -TYPED_TEST_CASE(CmpTest, CmpTestTypes); +TYPED_TEST_SUITE(CmpTest, CmpTestTypes); TYPED_TEST(CmpTest, Cmp) { TypeParam pair = {2, 3}; diff --git a/cmake/MiscCheck.cmake b/cmake/MiscCheck.cmake index d5b5cd3ddbc..71d73866b2a 100644 --- a/cmake/MiscCheck.cmake +++ b/cmake/MiscCheck.cmake @@ -13,24 +13,22 @@ include(CMakePushCheckState) set(CAFFE2_USE_EXCEPTION_PTR 1) # ---[ Check if we want to turn off deprecated warning due to glog. -# Note(jiayq): on ubuntu 14.04, the default glog install uses ext/hash_set that -# is being deprecated. As a result, we will test if this is the environment we -# are building under. If yes, we will turn off deprecation warning for a -# cleaner build output. -cmake_push_check_state(RESET) -set(CMAKE_REQUIRED_FLAGS "-std=c++14") -CHECK_CXX_SOURCE_COMPILES( - "#include - int main(int argc, char** argv) { - return 0; - }" CAFFE2_NEED_TO_TURN_OFF_DEPRECATION_WARNING - FAIL_REGEX ".*-Wno-deprecated.*") +if(USE_GLOG) + cmake_push_check_state(RESET) + set(CMAKE_REQUIRED_FLAGS "-std=c++14") + CHECK_CXX_SOURCE_COMPILES( + "#include + int main(int argc, char** argv) { + return 0; + }" CAFFE2_NEED_TO_TURN_OFF_DEPRECATION_WARNING + FAIL_REGEX ".*-Wno-deprecated.*") -if(NOT CAFFE2_NEED_TO_TURN_OFF_DEPRECATION_WARNING AND NOT MSVC) - message(STATUS "Turning off deprecation warning due to glog.") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated") + if(NOT CAFFE2_NEED_TO_TURN_OFF_DEPRECATION_WARNING AND NOT MSVC) + message(STATUS "Turning off deprecation warning due to glog.") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated") + endif() + cmake_pop_check_state() endif() -cmake_pop_check_state() # ---[ Check if the compiler has AVX/AVX2 support. We only check AVX2. if(NOT INTERN_BUILD_MOBILE) diff --git a/cmake/ProtoBuf.cmake b/cmake/ProtoBuf.cmake index 8d7633c4ab0..66acb1269ef 100644 --- a/cmake/ProtoBuf.cmake +++ b/cmake/ProtoBuf.cmake @@ -5,11 +5,6 @@ macro(custom_protobuf_find) option(protobuf_BUILD_TESTS "" OFF) option(protobuf_BUILD_EXAMPLES "" OFF) option(protobuf_WITH_ZLIB "" OFF) - if(APPLE) - # Protobuf generated files triggers a deprecated atomic operation warning - # so we turn it off here. - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations") - endif() if(${CAFFE2_LINK_LOCAL_PROTOBUF}) # If we are going to link protobuf locally, we will need to turn off # shared libs build for protobuf. diff --git a/cmake/public/utils.cmake b/cmake/public/utils.cmake index 6264efcc75d..bab415de44d 100644 --- a/cmake/public/utils.cmake +++ b/cmake/public/utils.cmake @@ -445,7 +445,6 @@ function(torch_compile_options libname) -Wno-unknown-pragmas -Wno-strict-overflow -Wno-strict-aliasing - -Wno-error=deprecated-declarations # Clang has an unfixed bug leading to spurious missing braces # warnings, see https://bugs.llvm.org/show_bug.cgi?id=21629 -Wno-missing-braces diff --git a/test/cpp/jit/test_lite_interpreter.cpp b/test/cpp/jit/test_lite_interpreter.cpp index 212d64251de..b96691b17c8 100644 --- a/test/cpp/jit/test_lite_interpreter.cpp +++ b/test/cpp/jit/test_lite_interpreter.cpp @@ -2217,7 +2217,7 @@ TEST_P(LiteInterpreterDynamicTypeTestFixture, Conformance) { } } -INSTANTIATE_TEST_CASE_P( +INSTANTIATE_TEST_SUITE_P( PyTorch, LiteInterpreterDynamicTypeTestFixture, ::testing::Range( diff --git a/torch/CMakeLists.txt b/torch/CMakeLists.txt index c6c23f25a41..9013d1d2314 100644 --- a/torch/CMakeLists.txt +++ b/torch/CMakeLists.txt @@ -279,6 +279,11 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set_source_files_properties(${TORCH_SRC_DIR}/csrc/utils/throughput_benchmark.cpp PROPERTIES COMPILE_FLAGS -Wno-attributes) endif() +if(NOT MSVC) + # cudaProfilerInitialize must go away + set_source_files_properties(${TORCH_SRC_DIR}/csrc/cuda/shared/cudart.cpp PROPERTIES COMPILE_FLAGS "-Wno-deprecated-declarations") +endif() + # coreml if(USE_COREML_DELEGATE) list(APPEND TORCH_PYTHON_SRCS ${TORCH_SRC_DIR}/csrc/jit/backends/coreml/cpp/backend.cpp) diff --git a/torch/csrc/jit/codegen/onednn/interface.cpp b/torch/csrc/jit/codegen/onednn/interface.cpp index e65b4048383..a7336d28d0d 100644 --- a/torch/csrc/jit/codegen/onednn/interface.cpp +++ b/torch/csrc/jit/codegen/onednn/interface.cpp @@ -99,9 +99,9 @@ void fuseGraph(std::shared_ptr& g) { Operation createLlgaKernel(const Node* node) { auto kernel = std::make_shared(node); - return [kernel](Stack* stack) { + return [kernel](Stack& stack) { RECORD_FUNCTION(kernel->debugName(), std::vector()); - kernel->run(*stack); + kernel->run(stack); return 0; }; } @@ -118,7 +118,7 @@ RegisterOperators oneDNNFusionGroupOp({ // But if we have any scalar inputs to guard in the future, some logic here // would have to be changed. Operation createLlgaGuardKernel(const Node* node) { - return [node](Stack* stack) { + return [node](Stack& stack) { #ifdef GRAPH_DEBUG_ENABLED GRAPH_DEBUG("Guarding node: ", node->kind().toQualString()); #endif