diff --git a/aten/src/ATen/core/List_inl.h b/aten/src/ATen/core/List_inl.h index 3e61fa24ee0..96f78faea22 100644 --- a/aten/src/ATen/core/List_inl.h +++ b/aten/src/ATen/core/List_inl.h @@ -47,7 +47,7 @@ List::List(TypePtr elementType) : List(make_intrusive( typename c10::detail::ListImpl::list_type(), std::move(elementType))) { - static_assert(std::is_same_v || std::is_same>::value, + static_assert(std::is_same_v || std::is_same_v>, "This constructor is only valid for c10::impl::GenericList or List."); } diff --git a/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h b/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h index c23f2e03381..7c2932b3aab 100644 --- a/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h +++ b/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h @@ -38,8 +38,8 @@ constexpr bool is_zarch_implemented_quant() { template constexpr bool is_zarch_implemented_complex() { - return std::is_same>::value || - std::is_same>::value; + return std::is_same_v> || + std::is_same_v>; } constexpr int offset0 = 0; diff --git a/aten/src/ATen/cuda/cub.cuh b/aten/src/ATen/cuda/cub.cuh index 8450c05c5d7..a1a7ab70630 100644 --- a/aten/src/ATen/cuda/cub.cuh +++ b/aten/src/ATen/cuda/cub.cuh @@ -478,7 +478,7 @@ constexpr int block_threads(){ template inline void inclusive_deterministic_scan(const scalar_t * input, scalar_t * output, ScanOpT scan_op, int64_t num_items) { - static_assert(std::is_same>::value, ""); + static_assert(std::is_same_v>, ""); constexpr int BLOCK_THREADS = block_threads(); constexpr int ITEMS_PER_THREAD = 16; auto grid_size = (num_items + BLOCK_THREADS * ITEMS_PER_THREAD - 1) / (BLOCK_THREADS * ITEMS_PER_THREAD); diff --git a/aten/src/ATen/native/cuda/ScanUtils.cuh b/aten/src/ATen/native/cuda/ScanUtils.cuh index 6746a16dd75..1bb64473009 100644 --- a/aten/src/ATen/native/cuda/ScanUtils.cuh +++ b/aten/src/ATen/native/cuda/ScanUtils.cuh @@ -451,7 +451,7 @@ void scan_dim(const TensorBase& self, const TensorBase& result, TORCH_INTERNAL_ASSERT(result.is_contiguous()); if (self.numel() == self.size(dim)) { - if constexpr (std::is_same>::value) { + if constexpr (std::is_same_v>) { if (C10_UNLIKELY(at::globalContext().deterministicAlgorithms()) && (self.is_floating_point() || self.is_complex())) { # if (defined(CUDA_VERSION) && CUDA_VERSION > 11040) || defined(USE_ROCM) cuda::cub::inclusive_deterministic_scan(self_->const_data_ptr(), result.mutable_data_ptr(), binary_op, self.numel()); diff --git a/aten/src/ATen/native/sparse/cuda/SparseMatMul.cu b/aten/src/ATen/native/sparse/cuda/SparseMatMul.cu index 1fa25dad02d..23fcd26cf41 100644 --- a/aten/src/ATen/native/sparse/cuda/SparseMatMul.cu +++ b/aten/src/ATen/native/sparse/cuda/SparseMatMul.cu @@ -211,8 +211,8 @@ struct CusparseMatrixMultiplyOp { std::is_same_v || std::is_same_v || std::is_same_v || - std::is_same, scalar_t>::value || - std::is_same, scalar_t>::value, + std::is_same_v, scalar_t> || + std::is_same_v, scalar_t>, "cusparseSpGEMM only supports data type of half, bfloat16, float, double and complex float, double."); // SpGEMM Computation TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&spgemmDesc)); @@ -673,8 +673,8 @@ void sparse_sparse_matmul_cuda_kernel( std::is_same_v || std::is_same_v || std::is_same_v || - std::is_same, scalar_t>::value || - std::is_same, scalar_t>::value, + std::is_same_v, scalar_t> || + std::is_same_v, scalar_t>, "sparse_sparse_matmul_cuda_kernel only supports data type of half, bfloat16, float, double and complex float, double."); // older versions of cusparse on Windows segfault for complex128 dtype diff --git a/aten/src/ATen/templates/TensorBody.h b/aten/src/ATen/templates/TensorBody.h index 7956ffb6aef..050d882f42b 100644 --- a/aten/src/ATen/templates/TensorBody.h +++ b/aten/src/ATen/templates/TensorBody.h @@ -582,7 +582,7 @@ class TORCH_API Tensor: public TensorBase { template using hook_return_void_t = std::enable_if_t>::value, unsigned>; template - using hook_return_var_t = std::enable_if_t, Tensor>::value, unsigned>; + using hook_return_var_t = std::enable_if_t, Tensor>, unsigned>; /// Registers a backward hook. ///