[11/N] Fix Wextra-semi warning (#140926)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/140926
Approved by: https://github.com/ezyang
This commit is contained in:
cyy 2024-11-20 00:32:45 +00:00 committed by PyTorch MergeBot
parent 0443398f5b
commit 0fca51bcc4
5 changed files with 45 additions and 38 deletions

View File

@ -816,10 +816,10 @@ Tensor cudnn_convolution_add_relu(
REGISTER_CUDA_DISPATCH(
cudnn_convolution_backward_stub,
&cudnn_convolution_backward);
&cudnn_convolution_backward)
REGISTER_CUDA_DISPATCH(
cudnn_convolution_transpose_backward_stub,
&cudnn_convolution_transpose_backward);
&cudnn_convolution_transpose_backward)
} // namespace native
} // namespace at

View File

@ -265,7 +265,7 @@ class CUDAAllocator : public Allocator {
size_t alloc_trace_max_entries,
RecordContext when) = 0;
virtual void recordAnnotation(
const std::vector<std::pair<std::string, std::string>>& md){};
const std::vector<std::pair<std::string, std::string>>& md) {}
virtual void attachOutOfMemoryObserver(OutOfMemoryObserver observer) = 0;
// Attached AllocatorTraceTracker callbacks will be called while the

View File

@ -732,6 +732,10 @@ if(USE_FBGEMM)
target_compile_options_if_supported(asmjit -Wno-deprecated-copy)
target_compile_options_if_supported(asmjit -Wno-unused-but-set-variable)
endif()
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
target_compile_options_if_supported(asmjit -Wno-extra-semi)
target_compile_options_if_supported(fbgemm -Wno-extra-semi)
endif()
endif()
if(USE_FBGEMM)
@ -1360,10 +1364,13 @@ if(NOT INTERN_BUILD_MOBILE)
string(APPEND CMAKE_CUDA_FLAGS " -Xcompiler=/wd4819,/wd4503,/wd4190,/wd4244,/wd4251,/wd4275,/wd4522")
else()
if(WERROR)
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_GREATER_EQUAL 13)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_GREATER_EQUAL 13)
string(APPEND CMAKE_CUDA_FLAGS " -Xcompiler -Wno-dangling-reference ")
endif()
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_GREATER_EQUAL 13))
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
string(APPEND CMAKE_CUDA_FLAGS " -Xcompiler -Wno-extra-semi ")
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_GREATER_EQUAL 13))
string(APPEND CMAKE_CUDA_FLAGS " -Xcompiler -Werror -Xcompiler -Wno-error=sign-compare ")
endif()
endif()

View File

@ -386,7 +386,7 @@ function(torch_compile_options libname)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
list(APPEND private_compile_options -Wunused-but-set-variable)
endif()
if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
list(APPEND private_compile_options -Wunused-private-field -Wextra-semi -Wno-error=extra-semi)
else()
list(APPEND private_compile_options

View File

@ -199,38 +199,38 @@ namespace c10d {
// (minor when adding fields, major when changing existing fields)
// Also update both JSON and Pickle dumps to make use of the newly defined
// field(s).
DEFINE_CONSTANT(version_val, "2.4");
DEFINE_CONSTANT(entries_key, "entries");
DEFINE_CONSTANT(nccl_comm_key, "nccl_comm_state");
DEFINE_CONSTANT(version_key, "version");
DEFINE_CONSTANT(pg_config_key, "pg_config");
DEFINE_CONSTANT(pg_status_key, "pg_status");
DEFINE_CONSTANT(record_id_key, "record_id");
DEFINE_CONSTANT(pg_id_key, "pg_id");
DEFINE_CONSTANT(pg_name_key, "process_group");
DEFINE_CONSTANT(collective_seq_id_key, "collective_seq_id");
DEFINE_CONSTANT(p2p_seq_id_key, "p2p_seq_id");
DEFINE_CONSTANT(is_p2p_key, "is_p2p");
DEFINE_CONSTANT(op_id_key, "op_id");
DEFINE_CONSTANT(profiling_name_key, "profiling_name");
DEFINE_CONSTANT(input_sizes_key, "input_sizes");
DEFINE_CONSTANT(input_dtypes_key, "input_dtypes");
DEFINE_CONSTANT(output_sizes_key, "output_sizes");
DEFINE_CONSTANT(output_dtypes_key, "output_dtypes");
DEFINE_CONSTANT(time_created_key, "time_created_ns");
DEFINE_CONSTANT(duration_key, "duration_ms");
DEFINE_CONSTANT(timeout_key, "timeout_ms");
DEFINE_CONSTANT(frames_key, "frames");
DEFINE_CONSTANT(state_key, "state");
DEFINE_CONSTANT(line_key, "line");
DEFINE_CONSTANT(name_key, "name");
DEFINE_CONSTANT(filename_key, "filename");
DEFINE_CONSTANT(retired_key, "retired");
DEFINE_CONSTANT(time_discovered_started_key, "time_discovered_started_ns");
DEFINE_CONSTANT(time_discovered_completed_key, "time_discovered_completed_ns");
DEFINE_CONSTANT(completed_state, "completed");
DEFINE_CONSTANT(scheduled_state, "scheduled");
DEFINE_CONSTANT(started_state, "started");
DEFINE_CONSTANT(version_val, "2.4")
DEFINE_CONSTANT(entries_key, "entries")
DEFINE_CONSTANT(nccl_comm_key, "nccl_comm_state")
DEFINE_CONSTANT(version_key, "version")
DEFINE_CONSTANT(pg_config_key, "pg_config")
DEFINE_CONSTANT(pg_status_key, "pg_status")
DEFINE_CONSTANT(record_id_key, "record_id")
DEFINE_CONSTANT(pg_id_key, "pg_id")
DEFINE_CONSTANT(pg_name_key, "process_group")
DEFINE_CONSTANT(collective_seq_id_key, "collective_seq_id")
DEFINE_CONSTANT(p2p_seq_id_key, "p2p_seq_id")
DEFINE_CONSTANT(is_p2p_key, "is_p2p")
DEFINE_CONSTANT(op_id_key, "op_id")
DEFINE_CONSTANT(profiling_name_key, "profiling_name")
DEFINE_CONSTANT(input_sizes_key, "input_sizes")
DEFINE_CONSTANT(input_dtypes_key, "input_dtypes")
DEFINE_CONSTANT(output_sizes_key, "output_sizes")
DEFINE_CONSTANT(output_dtypes_key, "output_dtypes")
DEFINE_CONSTANT(time_created_key, "time_created_ns")
DEFINE_CONSTANT(duration_key, "duration_ms")
DEFINE_CONSTANT(timeout_key, "timeout_ms")
DEFINE_CONSTANT(frames_key, "frames")
DEFINE_CONSTANT(state_key, "state")
DEFINE_CONSTANT(line_key, "line")
DEFINE_CONSTANT(name_key, "name")
DEFINE_CONSTANT(filename_key, "filename")
DEFINE_CONSTANT(retired_key, "retired")
DEFINE_CONSTANT(time_discovered_started_key, "time_discovered_started_ns")
DEFINE_CONSTANT(time_discovered_completed_key, "time_discovered_completed_ns")
DEFINE_CONSTANT(completed_state, "completed")
DEFINE_CONSTANT(scheduled_state, "scheduled")
DEFINE_CONSTANT(started_state, "started")
#undef DEFINE_CONSTANT
TORCH_API size_t hashTensors(const std::vector<at::Tensor>& tensors);