Revert "Enable -Wunused on torch targets (#150077)"

This reverts commit 688adc9941.

Reverted https://github.com/pytorch/pytorch/pull/150077 on behalf of https://github.com/wdvr due to failing internally with use of undeclared identifier ([comment](https://github.com/pytorch/pytorch/pull/150077#issuecomment-2846499828))
This commit is contained in:
PyTorch MergeBot 2025-05-02 06:53:20 +00:00
parent 3731b70b40
commit 6dadfc4457
21 changed files with 74 additions and 71 deletions

View File

@ -86,7 +86,7 @@ struct TORCH_API CUDAHooksInterface : AcceleratorHooksInterface {
TORCH_CHECK(false, "Cannot get device of pointer on CUDA without ATen_cuda library. ", CUDA_HELP); TORCH_CHECK(false, "Cannot get device of pointer on CUDA without ATen_cuda library. ", CUDA_HELP);
} }
bool isPinnedPtr(const void* /*data*/) const override { bool isPinnedPtr(const void* data) const override {
return false; return false;
} }

View File

@ -6,6 +6,8 @@
#include <ATen/detail/AcceleratorHooksInterface.h> #include <ATen/detail/AcceleratorHooksInterface.h>
#include <memory>
// NB: Class must live in `at` due to limitations of Registry.h. // NB: Class must live in `at` due to limitations of Registry.h.
namespace at { namespace at {
@ -35,7 +37,7 @@ struct TORCH_API HIPHooksInterface : AcceleratorHooksInterface {
return -1; return -1;
} }
bool isPinnedPtr(const void* /*data*/ ) const override { bool isPinnedPtr(const void* data) const override {
return false; return false;
} }
@ -47,7 +49,7 @@ struct TORCH_API HIPHooksInterface : AcceleratorHooksInterface {
return 0; return 0;
} }
bool hasPrimaryContext(DeviceIndex /*device_index*/ ) const override { bool hasPrimaryContext(DeviceIndex device_index) const override {
TORCH_CHECK(false, "Cannot check primary context without ATen_hip library."); TORCH_CHECK(false, "Cannot check primary context without ATen_hip library.");
} }
}; };

View File

@ -15,7 +15,7 @@ struct TORCH_API IPUHooksInterface : AcceleratorHooksInterface {
TORCH_CHECK(false, "Cannot initialize IPU without ATen_ipu library."); TORCH_CHECK(false, "Cannot initialize IPU without ATen_ipu library.");
} }
bool hasPrimaryContext(DeviceIndex /*device_index*/) const override { bool hasPrimaryContext(DeviceIndex device_index) const override {
TORCH_CHECK(false, "Cannot initialize IPU without ATen_ipu library."); TORCH_CHECK(false, "Cannot initialize IPU without ATen_ipu library.");
return false; return false;
} }
@ -26,7 +26,7 @@ struct TORCH_API IPUHooksInterface : AcceleratorHooksInterface {
} }
Generator getNewGenerator( Generator getNewGenerator(
DeviceIndex /*device_index*/ = -1) const override { DeviceIndex device_index [[maybe_unused]] = -1) const override {
TORCH_CHECK(false, "Cannot initialize IPU without ATen_ipu library."); TORCH_CHECK(false, "Cannot initialize IPU without ATen_ipu library.");
} }
}; };

View File

@ -17,7 +17,7 @@ struct TORCH_API MAIAHooksInterface : AcceleratorHooksInterface {
TORCH_CHECK(false, "Cannot initialize MAIA without ATen_maia library."); TORCH_CHECK(false, "Cannot initialize MAIA without ATen_maia library.");
} }
bool hasPrimaryContext(DeviceIndex /*device_index*/) const override { bool hasPrimaryContext(DeviceIndex device_index) const override {
TORCH_CHECK(false, "Cannot initialize MAIA without ATen_maia library."); TORCH_CHECK(false, "Cannot initialize MAIA without ATen_maia library.");
return false; return false;
} }

View File

@ -12,6 +12,7 @@
#include <ATen/detail/AcceleratorHooksInterface.h> #include <ATen/detail/AcceleratorHooksInterface.h>
#include <string> #include <string>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-parameter")
namespace at { namespace at {
class Context; class Context;
} }
@ -45,7 +46,7 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
return 0; return 0;
} }
virtual void deviceSynchronize(c10::DeviceIndex /*device_index*/) const { virtual void deviceSynchronize(c10::DeviceIndex device_index) const {
FAIL_MTIAHOOKS_FUNC(__func__); FAIL_MTIAHOOKS_FUNC(__func__);
} }
@ -53,11 +54,11 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
FAIL_MTIAHOOKS_FUNC(__func__); FAIL_MTIAHOOKS_FUNC(__func__);
} }
bool hasPrimaryContext(DeviceIndex /*device_index*/) const override { bool hasPrimaryContext(DeviceIndex device_index) const override {
return false; return false;
} }
void setCurrentDevice(DeviceIndex /*device*/) const override { void setCurrentDevice(DeviceIndex device) const override {
FAIL_MTIAHOOKS_FUNC(__func__); FAIL_MTIAHOOKS_FUNC(__func__);
} }
@ -66,36 +67,36 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
return -1; return -1;
} }
DeviceIndex exchangeDevice(DeviceIndex /*device*/) const override { DeviceIndex exchangeDevice(DeviceIndex device) const override {
FAIL_MTIAHOOKS_FUNC(__func__); FAIL_MTIAHOOKS_FUNC(__func__);
return -1; return -1;
} }
DeviceIndex maybeExchangeDevice(DeviceIndex /*device*/) const override { DeviceIndex maybeExchangeDevice(DeviceIndex device) const override {
FAIL_MTIAHOOKS_FUNC(__func__); FAIL_MTIAHOOKS_FUNC(__func__);
return -1; return -1;
} }
virtual c10::Stream getCurrentStream(DeviceIndex /*device*/) const { virtual c10::Stream getCurrentStream(DeviceIndex device) const {
FAIL_MTIAHOOKS_FUNC(__func__); FAIL_MTIAHOOKS_FUNC(__func__);
return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA); return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA);
} }
virtual int64_t getCurrentRawStream(DeviceIndex /*device*/) const { virtual int64_t getCurrentRawStream(DeviceIndex device) const {
FAIL_MTIAHOOKS_FUNC(__func__); FAIL_MTIAHOOKS_FUNC(__func__);
return -1; return -1;
} }
virtual c10::Stream getDefaultStream(DeviceIndex /*device*/) const { virtual c10::Stream getDefaultStream(DeviceIndex device) const {
FAIL_MTIAHOOKS_FUNC(__func__); FAIL_MTIAHOOKS_FUNC(__func__);
return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA); return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA);
} }
virtual void setCurrentStream(const c10::Stream& /*stream*/ ) const { virtual void setCurrentStream(const c10::Stream& stream) const {
FAIL_MTIAHOOKS_FUNC(__func__); FAIL_MTIAHOOKS_FUNC(__func__);
} }
bool isPinnedPtr(const void* /*data*/) const override { bool isPinnedPtr(const void* data) const override {
return false; return false;
} }
@ -104,12 +105,12 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
return nullptr; return nullptr;
} }
virtual PyObject* memoryStats(DeviceIndex /*device*/) const { virtual PyObject* memoryStats(DeviceIndex device) const {
FAIL_MTIAHOOKS_FUNC(__func__); FAIL_MTIAHOOKS_FUNC(__func__);
return nullptr; return nullptr;
} }
virtual PyObject* getDeviceCapability(DeviceIndex /*device*/) const { virtual PyObject* getDeviceCapability(DeviceIndex device) const {
FAIL_MTIAHOOKS_FUNC(__func__); FAIL_MTIAHOOKS_FUNC(__func__);
return nullptr; return nullptr;
} }
@ -120,9 +121,9 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
virtual void recordMemoryHistory( virtual void recordMemoryHistory(
const std::optional<std::string>& /*enabled*/, const std::optional<std::string>& enabled,
const std::string& /*stacks*/, const std::string& stacks,
size_t /*max_entries*/) const { size_t max_entries) const {
FAIL_MTIAHOOKS_FUNC(__func__); FAIL_MTIAHOOKS_FUNC(__func__);
} }
@ -136,7 +137,7 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
return 0; return 0;
} }
virtual void resetPeakMemoryStats(DeviceIndex /*device*/) const { virtual void resetPeakMemoryStats(DeviceIndex device) const {
FAIL_MTIAHOOKS_FUNC(__func__); FAIL_MTIAHOOKS_FUNC(__func__);
} }
@ -157,3 +158,4 @@ TORCH_API const MTIAHooksInterface& getMTIAHooks();
TORCH_API bool isMTIAHooksBuilt(); TORCH_API bool isMTIAHooksBuilt();
} // namespace detail } // namespace detail
} // namespace at } // namespace at
C10_DIAGNOSTIC_POP()

View File

@ -4201,7 +4201,8 @@ static inline void handle_unflatten_exception(
const std::runtime_error& e, const std::runtime_error& e,
const Tensor& self, const Tensor& self,
int64_t dim, int64_t dim,
SymIntArrayRef sizes) { SymIntArrayRef sizes,
std::optional<DimnameList> names) {
if (!strstr(e.what(), "is invalid for input of size")) { if (!strstr(e.what(), "is invalid for input of size")) {
TORCH_CHECK(false, "unflatten got an unexpected error:\n", e.what()); TORCH_CHECK(false, "unflatten got an unexpected error:\n", e.what());
} }
@ -4255,7 +4256,7 @@ static Tensor unflatten_impl(
// at::infer_size would throw std::runtime_error for invalid size, // at::infer_size would throw std::runtime_error for invalid size,
// catch the runtime_error and display the error message in a more // catch the runtime_error and display the error message in a more
// user-friendly way for both tensors and named tensors // user-friendly way for both tensors and named tensors
handle_unflatten_exception(e, self, dim, sizes); handle_unflatten_exception(e, self, dim, sizes, names);
} }
SymDimVector shape(self.sym_sizes().begin(), self.sym_sizes().end()); SymDimVector shape(self.sym_sizes().begin(), self.sym_sizes().end());

View File

@ -48,7 +48,7 @@ struct static_unroll {
template<template<int i> typename func, int end> template<template<int i> typename func, int end>
struct static_unroll<func, end, end> { struct static_unroll<func, end, end> {
template<typename... Args> template<typename... Args>
static inline C10_HOST_DEVICE void with_args(Args... /*args*/) {} static inline C10_HOST_DEVICE void with_args(Args... args) {}
}; };
// helper structs to be used with static_unroll to load arguments // helper structs to be used with static_unroll to load arguments
@ -516,7 +516,7 @@ inline C10_HOST_DEVICE int can_vectorize_up_to(char *pointer) {
template<int i> template<int i>
struct can_vectorize_up_to_helper { struct can_vectorize_up_to_helper {
template <typename array_t, typename traits> template <typename array_t, typename traits>
static C10_HOST_DEVICE void apply(int &result, array_t pointers, traits /*_*/) { static C10_HOST_DEVICE void apply(int &result, array_t pointers, traits _) {
using arg_t = typename traits::template arg<i>::type; using arg_t = typename traits::template arg<i>::type;
// `pointers` hold the data_ptr for tensors [output, input0, input1, ...], so we // `pointers` hold the data_ptr for tensors [output, input0, input1, ...], so we
// need a +1 offset to get the input // need a +1 offset to get the input

View File

@ -331,9 +331,6 @@ if(NOT TARGET clog)
"${CONFU_DEPENDENCIES_BINARY_DIR}/clog") "${CONFU_DEPENDENCIES_BINARY_DIR}/clog")
# We build static version of clog but a dynamic library may indirectly depend on it # We build static version of clog but a dynamic library may indirectly depend on it
set_property(TARGET clog PROPERTY POSITION_INDEPENDENT_CODE ON) set_property(TARGET clog PROPERTY POSITION_INDEPENDENT_CODE ON)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
target_compile_options(clog PRIVATE "-Wno-unused-result")
endif()
endif() endif()
target_link_libraries(pytorch_qnnpack PUBLIC clog) target_link_libraries(pytorch_qnnpack PUBLIC clog)

View File

@ -32,7 +32,7 @@ c10::intrusive_ptr<GeneratorImpl> GeneratorImpl::clone() const {
} }
void GeneratorImpl::graphsafe_set_state( void GeneratorImpl::graphsafe_set_state(
const c10::intrusive_ptr<c10::GeneratorImpl>& /*state*/) { const c10::intrusive_ptr<c10::GeneratorImpl>& state) {
TORCH_CHECK_NOT_IMPLEMENTED( TORCH_CHECK_NOT_IMPLEMENTED(
false, "graphsafe_set_state is not supported in this Generator"); false, "graphsafe_set_state is not supported in this Generator");
} }

View File

@ -102,7 +102,7 @@ TensorImpl::TensorImpl(
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
TensorImpl::TensorImpl( TensorImpl::TensorImpl(
ImplType /*type*/, ImplType type,
Storage&& storage, Storage&& storage,
DispatchKeySet key_set, DispatchKeySet key_set,
const caffe2::TypeMeta data_type) const caffe2::TypeMeta data_type)

View File

@ -59,15 +59,15 @@ struct FakeGuardImpl final : public DeviceGuardImplInterface {
// Event-related functions // Event-related functions
void record( void record(
void** /*event*/, void** event,
const Stream& /*stream*/, const Stream& stream,
const DeviceIndex /*device_index*/, const DeviceIndex device_index,
const EventFlag /*flag*/) const override {} const EventFlag flag) const override {}
void block(void* /*event*/, const Stream& /*stream*/) const override {} void block(void* event, const Stream& stream) const override {}
bool queryEvent(void* /*event*/) const override { bool queryEvent(void* event) const override {
return true; return true;
} }
void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/) void destroyEvent(void* event, const DeviceIndex device_index)
const noexcept override {} const noexcept override {}
// Convenience methods for testing // Convenience methods for testing

View File

@ -1,7 +1,7 @@
#include <c10/core/SymIntArrayRef.h> #include <c10/core/SymIntArrayRef.h>
#include <c10/core/TensorImpl.h> #include <c10/core/TensorImpl.h>
#include <c10/core/impl/PyInterpreter.h> #include <c10/core/impl/PyInterpreter.h>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-parameter")
namespace c10::impl { namespace c10::impl {
struct NoopPyInterpreterVTable final : public PyInterpreterVTable { struct NoopPyInterpreterVTable final : public PyInterpreterVTable {
@ -145,4 +145,3 @@ void PyInterpreter::disarm() noexcept {
} }
} // namespace c10::impl } // namespace c10::impl
C10_DIAGNOSTIC_POP()

View File

@ -70,7 +70,7 @@ inline bool is_thp_alloc(size_t nbytes) {
} }
#elif !defined(__ANDROID__) && !defined(_MSC_VER) #elif !defined(__ANDROID__) && !defined(_MSC_VER)
constexpr size_t c10_compute_alignment(size_t /*nbytes*/) { constexpr size_t c10_compute_alignment([[maybe_unused]] size_t nbytes) {
return gAlignment; return gAlignment;
} }

View File

@ -227,9 +227,7 @@ class CUDAAllocator : public Allocator {
c10::DeviceIndex device, c10::DeviceIndex device,
MempoolId_t mempool_id) = 0; MempoolId_t mempool_id) = 0;
virtual void releasePool(c10::DeviceIndex device, MempoolId_t mempool_id) = 0; virtual void releasePool(c10::DeviceIndex device, MempoolId_t mempool_id) = 0;
virtual int getPoolUseCount( virtual int getPoolUseCount(c10::DeviceIndex device, MempoolId_t mempool_id) {
c10::DeviceIndex /*device*/,
MempoolId_t /*mempool_id*/) {
TORCH_CHECK( TORCH_CHECK(
false, false,
name(), name(),
@ -237,8 +235,8 @@ class CUDAAllocator : public Allocator {
"If you need it, please file an issue describing your use case."); "If you need it, please file an issue describing your use case.");
} }
virtual void ensureExistsAndIncrefPool( virtual void ensureExistsAndIncrefPool(
c10::DeviceIndex /*device*/, c10::DeviceIndex device,
MempoolId_t /*mempool_id*/) { MempoolId_t mempool_id) {
TORCH_CHECK( TORCH_CHECK(
false, false,
name(), name(),
@ -258,9 +256,9 @@ class CUDAAllocator : public Allocator {
// returns true if the allocated blocks are equal to expected live allocations // returns true if the allocated blocks are equal to expected live allocations
virtual bool checkPoolLiveAllocations( virtual bool checkPoolLiveAllocations(
c10::DeviceIndex /*device*/, c10::DeviceIndex device,
MempoolId_t /*mempool_id*/, MempoolId_t mempool_id,
const std::unordered_set<void*>& /*expected_live_allocations*/) { const std::unordered_set<void*>& expected_live_allocations) {
TORCH_CHECK( TORCH_CHECK(
false, false,
name(), name(),
@ -283,7 +281,7 @@ class CUDAAllocator : public Allocator {
RecordContext when, RecordContext when,
bool clearHistory) = 0; bool clearHistory) = 0;
virtual void recordAnnotation( virtual void recordAnnotation(
const std::vector<std::pair<std::string, std::string>>& /*md*/) {} const std::vector<std::pair<std::string, std::string>>& md) {}
virtual void attachOutOfMemoryObserver(OutOfMemoryObserver observer) = 0; virtual void attachOutOfMemoryObserver(OutOfMemoryObserver observer) = 0;
// Attached AllocatorTraceTracker callbacks will be called while the // Attached AllocatorTraceTracker callbacks will be called while the

View File

@ -213,11 +213,11 @@ bool CUDAKernelLaunchRegistry::check_env_for_dsa_enabled() const {
} }
uint32_t CUDAKernelLaunchRegistry::insert( uint32_t CUDAKernelLaunchRegistry::insert(
const char* /*launch_filename*/, const char* launch_filename,
const char* /*launch_function*/, const char* launch_function,
const uint32_t /*launch_linenum*/, const uint32_t launch_linenum,
const char* /*kernel_name*/, const char* kernel_name,
const int32_t /*stream_id*/) { const int32_t stream_id) {
#ifdef TORCH_USE_CUDA_DSA #ifdef TORCH_USE_CUDA_DSA
if (!enabled_at_runtime) { if (!enabled_at_runtime) {
return 0; return 0;

View File

@ -10,9 +10,9 @@ namespace c10::cuda {
void c10_cuda_check_implementation( void c10_cuda_check_implementation(
const int32_t err, const int32_t err,
const char* /*filename*/, const char* filename,
const char* /*function_name*/, const char* function_name,
const int /*line_number*/, const int line_number,
const bool include_device_assertions) { const bool include_device_assertions) {
const auto cuda_error = static_cast<cudaError_t>(err); const auto cuda_error = static_cast<cudaError_t>(err);
const auto cuda_kernel_failure = include_device_assertions const auto cuda_kernel_failure = include_device_assertions
@ -24,6 +24,7 @@ void c10_cuda_check_implementation(
} }
[[maybe_unused]] auto error_unused = cudaGetLastError(); [[maybe_unused]] auto error_unused = cudaGetLastError();
(void)error_unused;
std::string check_message; std::string check_message;
#ifndef STRIP_ERROR_MESSAGES #ifndef STRIP_ERROR_MESSAGES

View File

@ -213,7 +213,7 @@ Warning::Warning(
Warning::Warning( Warning::Warning(
warning_variant_t type, warning_variant_t type,
SourceLocation source_location, SourceLocation source_location,
detail::CompileTimeEmptyString /*msg*/, detail::CompileTimeEmptyString msg,
const bool verbatim) const bool verbatim)
: Warning(type, source_location, "", verbatim) {} : Warning(type, source_location, "", verbatim) {}

View File

@ -139,7 +139,7 @@ APIUsageLoggerType* GetAPIUsageLogger() {
APIUsageMetadataLoggerType* GetAPIUsageMetadataLogger() { APIUsageMetadataLoggerType* GetAPIUsageMetadataLogger() {
static APIUsageMetadataLoggerType func = static APIUsageMetadataLoggerType func =
[](const std::string&, [](const std::string&,
const std::map<std::string, std::string>& /*metadata_map*/) {}; const std::map<std::string, std::string>& metadata_map) {};
return &func; return &func;
} }
@ -386,7 +386,7 @@ void initLogging() {
detail::setLogLevelFlagFromEnv(); detail::setLogLevelFlagFromEnv();
} }
bool InitCaffeLogging(int* argc, char** /*argv*/) { bool InitCaffeLogging(int* argc, char** argv) {
// When doing InitCaffeLogging, we will assume that caffe's flag parser has // When doing InitCaffeLogging, we will assume that caffe's flag parser has
// already finished. // already finished.
if (*argc == 0) if (*argc == 0)

View File

@ -216,7 +216,9 @@ struct DinicFlowGraph {
return seen; return seen;
} }
std::pair<std::vector<size_t>, std::vector<size_t>> partition(size_t t) { std::pair<std::vector<size_t>, std::vector<size_t>> partition(
size_t s,
size_t t) {
// Note: the partitioning returns "reachable" / "unreachable", // Note: the partitioning returns "reachable" / "unreachable",
// but specifically, for "unreachable", it returns "all vertices // but specifically, for "unreachable", it returns "all vertices
// that are reachable from t in the reverse residual graph" // that are reachable from t in the reverse residual graph"
@ -256,7 +258,7 @@ struct DinicFlowGraph {
}; };
} }
auto [reachable_idxs, unreachable_idxs] = partition(t_int); auto [reachable_idxs, unreachable_idxs] = partition(s_int, t_int);
std::vector<std::string> reachable, unreachable; std::vector<std::string> reachable, unreachable;
auto idxs_to_names = [&](std::vector<size_t>& src, auto idxs_to_names = [&](std::vector<size_t>& src,

View File

@ -113,9 +113,9 @@ bool IsNUMAEnabled() {
return false; return false;
} }
void NUMABind(int /*numa_node_id*/) {} void NUMABind(int numa_node_id) {}
int GetNUMANode(const void* /*ptr*/) { int GetNUMANode(const void* ptr) {
return -1; return -1;
} }
@ -123,7 +123,7 @@ int GetNumNUMANodes() {
return -1; return -1;
} }
void NUMAMove(void* /*ptr*/, size_t /*size*/, int /*numa_node_id*/) {} void NUMAMove(void* ptr, size_t size, int numa_node_id) {}
int GetCurrentNUMANode() { int GetCurrentNUMANode() {
return -1; return -1;

View File

@ -383,7 +383,6 @@ function(torch_compile_options libname)
-Wall -Wall
-Wextra -Wextra
-Wdeprecated -Wdeprecated
-Wunused
-Wno-unused-parameter -Wno-unused-parameter
-Wno-missing-field-initializers -Wno-missing-field-initializers
-Wno-array-bounds -Wno-array-bounds
@ -391,11 +390,13 @@ function(torch_compile_options libname)
-Wno-strict-overflow -Wno-strict-overflow
-Wno-strict-aliasing -Wno-strict-aliasing
) )
list(APPEND private_compile_options -Wunused-function)
list(APPEND private_compile_options -Wunused-variable)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
list(APPEND private_compile_options -Wredundant-move) list(APPEND private_compile_options -Wunused-but-set-variable -Wredundant-move)
endif() endif()
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
list(APPEND private_compile_options -Wextra-semi -Wno-error=extra-semi -Wmove) list(APPEND private_compile_options -Wunused-private-field -Wextra-semi -Wno-error=extra-semi -Wmove)
else() else()
list(APPEND private_compile_options list(APPEND private_compile_options
# Considered to be flaky. See the discussion at # Considered to be flaky. See the discussion at
@ -408,9 +409,9 @@ function(torch_compile_options libname)
-Werror -Werror
-Werror=inconsistent-missing-override -Werror=inconsistent-missing-override
-Werror=inconsistent-missing-destructor-override -Werror=inconsistent-missing-destructor-override
-Werror=unused-function
-Werror=unused-variable
-Werror=pedantic -Werror=pedantic
-Werror=unused
-Wno-error=unused-parameter
) )
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
list(APPEND private_compile_options -Werror=unused-but-set-variable) list(APPEND private_compile_options -Werror=unused-but-set-variable)