Some CMake and CUDA cleanup given recent update to C++17 (#90599)

The main changes are:
1. Remove outdated checks for old compiler versions because they can't support C++17.
2. Remove outdated CMake checks because it now requires 3.18.
3. Remove outdated CUDA checks because we are moving to CUDA 11.

Almost all changes are in CMake files for easy audition.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/90599
Approved by: https://github.com/soumith
This commit is contained in:
cyy 2022-12-30 11:19:23 +00:00 committed by PyTorch MergeBot
parent d5163f5206
commit 9710ac6531
13 changed files with 25 additions and 119 deletions

View File

@ -13,8 +13,6 @@ cmake_policy(SET CMP0025 NEW)
# nice when it's possible, and it's possible on our Windows configs.
cmake_policy(SET CMP0092 NEW)
set(FIND_CUDA_MODULE_DEPRECATED ON)
# ---[ Project and semantic versioning.
project(Torch CXX C)
@ -242,9 +240,7 @@ option(USE_OBSERVERS "Use observers module." OFF)
option(USE_OPENCL "Use OpenCL" OFF)
option(USE_OPENCV "Use OpenCV" OFF)
option(USE_OPENMP "Use OpenMP for parallel code" ON)
cmake_dependent_option(
USE_PRECOMPILED_HEADERS "Use pre-compiled headers to accelerate build. Requires cmake >= 3.16." OFF
"CMAKE_VERSION VERSION_GREATER_EQUAL \"3.16\"" OFF)
option(USE_PRECOMPILED_HEADERS "Use pre-compiled headers to accelerate build." OFF)
option(USE_PROF "Use profiling" OFF)
option(USE_QNNPACK "Use QNNPACK (quantized 8-bit operators)" ON)
@ -272,13 +268,7 @@ option(USE_VULKAN_FP16_INFERENCE "Vulkan - Use fp16 inference" OFF)
option(USE_VULKAN_RELAXED_PRECISION "Vulkan - Use relaxed precision math in the kernels (mediump)" OFF)
option(USE_VULKAN_SHADERC_RUNTIME "Vulkan - Use runtime shader compilation as opposed to build-time (needs libshaderc)" OFF)
# option USE_XNNPACK: try to enable xnnpack by default.
set(XNNPACK_MIN_CMAKE_VER 3.12)
cmake_dependent_option(
USE_XNNPACK "Use XNNPACK. Requires cmake >= ${XNNPACK_MIN_CMAKE_VER}." ON
"CMAKE_VERSION VERSION_GREATER_EQUAL ${XNNPACK_MIN_CMAKE_VER}" OFF)
if(NOT USE_XNNPACK AND CMAKE_VERSION VERSION_LESS ${XNNPACK_MIN_CMAKE_VER})
message(WARNING "USE_XNNPACK is set to OFF. XNNPACK requires CMake version ${XNNPACK_MIN_CMAKE_VER} or greater.")
endif()
option(USE_XNNPACK "Use XNNPACK" ON)
option(USE_ZMQ "Use ZMQ" OFF)
option(USE_ZSTD "Use ZSTD" OFF)
option(TORCH_DISABLE_GPU_ASSERTS "Disable GPU asserts by default" OFF)
@ -724,16 +714,6 @@ if(USE_FLASH_ATTENTION)
ENDIF()
if(USE_CUDA AND (CMAKE_CUDA_COMPILER_VERSION VERSION_LESS 10.2) AND (CMAKE_HOST_SYSTEM_NAME MATCHES "Windows"))
# CUDA < 10.2 doesn't support compiling and extracting header dependencies in
# one call, so instead CMake calls nvcc twice with && in between.
# However, on windows cmd.exe has a 8191 character limit for commands which we
# start hitting. This moves most argments into a file to avoid going over the limit
set(CMAKE_CUDA_USE_RESPONSE_FILE_FOR_OBJECTS ON)
set(CMAKE_NINJA_FORCE_RESPONSE_FILE ON CACHE INTERNAL "")
endif()
if(USE_FBGEMM)
string(APPEND CMAKE_CXX_FLAGS " -DUSE_FBGEMM")
endif()
@ -837,10 +817,6 @@ if(NOT MSVC)
string(APPEND CMAKE_CXX_FLAGS " -Wno-range-loop-analysis")
string(APPEND CMAKE_CXX_FLAGS " -Wno-pass-failed")
endif()
if(CMAKE_COMPILER_IS_GNUCXX AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6.0.0))
# Suppress issue: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43407
string(APPEND CMAKE_CXX_FLAGS " -Wno-attributes")
endif()
if(CMAKE_COMPILER_IS_GNUCXX AND NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0.0))
string(APPEND CMAKE_CXX_FLAGS " -Wno-stringop-overflow")
endif()

View File

@ -156,12 +156,12 @@ They require JetPack 4.2 and above, and [@dusty-nv](https://github.com/dusty-nv)
#### Prerequisites
If you are installing from source, you will need:
- Python 3.7 or later (for Linux, Python 3.7.6+ or 3.8.1+ is needed)
- A C++14 compatible compiler, such as clang
- A C++17 compatible compiler, such as clang
We highly recommend installing an [Anaconda](https://www.anaconda.com/distribution/#download-section) environment. You will get a high-quality BLAS library (MKL) and you get controlled dependency versions regardless of your Linux distro.
If you want to compile with CUDA support, install the following (note that CUDA is not supported on macOS)
- [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads) 10.2 or above
- [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads) 11.0 or above
- [NVIDIA cuDNN](https://developer.nvidia.com/cudnn) v7 or above
- [Compiler](https://gist.github.com/ax3l/9489132) compatible with CUDA

View File

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake ${CMAKE_MODULE_PATH})
if(NOT MSVC)

View File

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.10 FATAL_ERROR)
cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
project(c10 CXX)
set(CMAKE_CXX_STANDARD 17 CACHE STRING "The C++ standard whose features are requested to build this target.")

View File

@ -836,13 +836,9 @@ endif()
# Pass path to PocketFFT
if(AT_POCKETFFT_ENABLED)
if(CMAKE_VERSION VERSION_LESS "3.11")
target_include_directories(torch_cpu PRIVATE "${POCKETFFT_INCLUDE_DIR}")
else()
set_source_files_properties(
"${PROJECT_SOURCE_DIR}/aten/src/ATen/native/mkl/SpectralOps.cpp"
PROPERTIES INCLUDE_DIRECTORIES "${POCKETFFT_INCLUDE_DIR}")
endif()
set_source_files_properties(
"${PROJECT_SOURCE_DIR}/aten/src/ATen/native/mkl/SpectralOps.cpp"
PROPERTIES INCLUDE_DIRECTORIES "${POCKETFFT_INCLUDE_DIR}")
endif()
if(CMAKE_COMPILER_IS_GNUCXX AND BUILD_LIBTORCH_CPU_WITH_DEBUG)

View File

@ -1634,10 +1634,8 @@ function(add_onnx_tensorrt_subdir)
set(CUDNN_INCLUDE_DIR "${CUDNN_INCLUDE_PATH}")
set(CUDNN_LIBRARY "${CUDNN_LIBRARY_PATH}")
set(CMAKE_VERSION_ORIG "{CMAKE_VERSION}")
if(FIND_CUDA_MODULE_DEPRECATED)
# TODO: this WAR is for https://github.com/pytorch/pytorch/issues/18524
set(CMAKE_VERSION "3.9.0")
endif()
# TODO: this WAR is for https://github.com/pytorch/pytorch/issues/18524
set(CMAKE_VERSION "3.9.0")
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/onnx-tensorrt EXCLUDE_FROM_ALL)
set(CMAKE_VERSION "{CMAKE_VERSION_ORIG}")
endfunction()
@ -1683,32 +1681,20 @@ if(NOT INTERN_BUILD_MOBILE)
string(APPEND CMAKE_CUDA_FLAGS " -Xcompiler=/wd4819,/wd4503,/wd4190,/wd4244,/wd4251,/wd4275,/wd4522")
endif()
if(NOT MSVC)
set(CMAKE_C_STANDARD 11 CACHE STRING "The C standard whose features are requested to build this target.")
endif()
string(APPEND CMAKE_CUDA_FLAGS " -Wno-deprecated-gpu-targets --expt-extended-lambda")
if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(CMAKE_CXX_STANDARD 17 CACHE STRING "The C++ standard whose features are requested to build this target.")
endif()
# use cub in a safe manner, see:
# https://github.com/pytorch/pytorch/pull/55292
if(NOT ${CUDA_VERSION} LESS 11.5)
string(APPEND CMAKE_CUDA_FLAGS " -DCUB_WRAPPED_NAMESPACE=at_cuda_detail")
endif()
if(CUDA_HAS_FP16 OR NOT ${CUDA_VERSION} LESS 7.5)
message(STATUS "Found CUDA with FP16 support, compiling with torch.cuda.HalfTensor")
string(APPEND CMAKE_CUDA_FLAGS " -DCUDA_HAS_FP16=1"
" -D__CUDA_NO_HALF_OPERATORS__"
" -D__CUDA_NO_HALF_CONVERSIONS__"
" -D__CUDA_NO_HALF2_OPERATORS__"
" -D__CUDA_NO_BFLOAT16_CONVERSIONS__")
else()
message(STATUS "Could not find CUDA with FP16 support, compiling without torch.CudaHalfTensor")
endif()
message(STATUS "Found CUDA with FP16 support, compiling with torch.cuda.HalfTensor")
string(APPEND CMAKE_CUDA_FLAGS " -DCUDA_HAS_FP16=1"
" -D__CUDA_NO_HALF_OPERATORS__"
" -D__CUDA_NO_HALF_CONVERSIONS__"
" -D__CUDA_NO_HALF2_OPERATORS__"
" -D__CUDA_NO_BFLOAT16_CONVERSIONS__")
string(APPEND CMAKE_C_FLAGS_RELEASE " -DNDEBUG")
string(APPEND CMAKE_CXX_FLAGS_RELEASE " -DNDEBUG")

View File

@ -41,57 +41,7 @@ if(NOT INTERN_BUILD_MOBILE)
cmake_pop_check_state()
endif()
if(NOT INTERN_BUILD_MOBILE)
# ---[ Check if certain std functions are supported. Sometimes
# _GLIBCXX_USE_C99 macro is not defined and some functions are missing.
cmake_push_check_state(RESET)
set(CMAKE_REQUIRED_FLAGS "-std=c++14")
CHECK_CXX_SOURCE_COMPILES("
#include <cmath>
#include <string>
int main() {
int a = std::isinf(3.0);
int b = std::isnan(0.0);
std::string s = std::to_string(1);
return 0;
}" SUPPORT_GLIBCXX_USE_C99)
if(NOT SUPPORT_GLIBCXX_USE_C99)
# Force cmake to retest next time around
unset(SUPPORT_GLIBCXX_USE_C99 CACHE)
message(FATAL_ERROR
"The C++ compiler does not support required functions. "
"This is very likely due to a known bug in GCC 5 "
"(and maybe other versions) on Ubuntu 17.10 and newer. "
"For more information, see: "
"https://github.com/pytorch/pytorch/issues/5229")
endif()
cmake_pop_check_state()
endif()
# ---[ Check if std::exception_ptr is supported.
cmake_push_check_state(RESET)
set(CMAKE_REQUIRED_FLAGS "-std=c++14")
CHECK_CXX_SOURCE_COMPILES(
"#include <string>
#include <exception>
int main(int argc, char** argv) {
std::exception_ptr eptr;
try {
std::string().at(1);
} catch(...) {
eptr = std::current_exception();
}
}" CAFFE2_EXCEPTION_PTR_SUPPORTED)
if(CAFFE2_EXCEPTION_PTR_SUPPORTED)
message(STATUS "std::exception_ptr is supported.")
set(CAFFE2_USE_EXCEPTION_PTR 1)
else()
message(STATUS "std::exception_ptr is NOT supported.")
endif()
cmake_pop_check_state()
set(CAFFE2_USE_EXCEPTION_PTR 1)
# ---[ Check if we want to turn off deprecated warning due to glog.
# Note(jiayq): on ubuntu 14.04, the default glog install uses ext/hash_set that

View File

@ -181,7 +181,7 @@ if(NOT @BUILD_SHARED_LIBS@)
endif()
set_target_properties(torch PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES "${TORCH_INCLUDE_DIRS}"
CXX_STANDARD 14
CXX_STANDARD 17
)
if(TORCH_CXX_FLAGS)
set_property(TARGET torch PROPERTY INTERFACE_COMPILE_OPTIONS "${TORCH_CXX_FLAGS}")

View File

@ -51,8 +51,8 @@ set(CMAKE_CUDA_STANDARD_REQUIRED ON)
message(STATUS "Caffe2: CUDA detected: " ${CUDA_VERSION})
message(STATUS "Caffe2: CUDA nvcc is: " ${CUDA_NVCC_EXECUTABLE})
message(STATUS "Caffe2: CUDA toolkit directory: " ${CUDA_TOOLKIT_ROOT_DIR})
if(CUDA_VERSION VERSION_LESS 10.2)
message(FATAL_ERROR "PyTorch requires CUDA 10.2 or above.")
if(CUDA_VERSION VERSION_LESS 11.0)
message(FATAL_ERROR "PyTorch requires CUDA 11.0 or above.")
endif()
if(CUDA_FOUND)

View File

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.12)
cmake_minimum_required(VERSION 3.18)
project(functorch)
set(CMAKE_CXX_STANDARD 17)

View File

@ -2,7 +2,7 @@
# Now it only builds the Torch python bindings.
if(NOT CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO)
cmake_minimum_required(VERSION 3.10 FATAL_ERROR)
cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
project(torch CXX C)
find_package(torch REQUIRED)
option(USE_CUDA "Use CUDA" ON)

View File

@ -1,5 +1,5 @@
project(libshm C CXX)
cmake_minimum_required(VERSION 3.13 FATAL_ERROR)
cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
set(TORCH_ROOT ${CMAKE_CURRENT_LIST_DIR}/../../../)
include(${TORCH_ROOT}/cmake/public/threads.cmake)

View File

@ -50,7 +50,6 @@ VersionMap = Dict[str, VersionRange]
# Or from include/crt/host_config.h in the CUDA SDK
# The second value is the exclusive(!) upper bound, i.e. min <= version < max
CUDA_GCC_VERSIONS: VersionMap = {
'10.2': (MINIMUM_GCC_VERSION, (9, 0)),
'11.0': (MINIMUM_GCC_VERSION, (10, 0)),
'11.1': (MINIMUM_GCC_VERSION, (11, 0)),
'11.2': (MINIMUM_GCC_VERSION, (11, 0)),
@ -63,7 +62,6 @@ CUDA_GCC_VERSIONS: VersionMap = {
MINIMUM_CLANG_VERSION = (3, 3, 0)
CUDA_CLANG_VERSIONS: VersionMap = {
'10.2': (MINIMUM_CLANG_VERSION, (9, 0)),
'11.1': (MINIMUM_CLANG_VERSION, (11, 0)),
'11.2': (MINIMUM_CLANG_VERSION, (12, 0)),
'11.3': (MINIMUM_CLANG_VERSION, (12, 0)),
@ -2147,7 +2145,7 @@ def _write_ninja_file(path,
# --generate-dependencies-with-compile was added in CUDA 10.2.
# Compilation will work on earlier CUDA versions but header file
# dependencies are not correctly computed.
required_cuda_version = packaging.version.parse('10.2')
required_cuda_version = packaging.version.parse('11.0')
has_cuda_version = torch.version.cuda is not None
if has_cuda_version and packaging.version.parse(torch.version.cuda) >= required_cuda_version:
cuda_compile_rule.append(' depfile = $out.d')