Replace is_same with is_same_v for concise syntax (#145450)

Replace `std::is_same<T, U>::value` with `std::is_same_v` for concise and consistent syntax with other code.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/145450
Approved by: https://github.com/huydhn
This commit is contained in:
zeshengzong 2025-02-13 03:29:39 +00:00 committed by PyTorch MergeBot
parent c159723c39
commit 6ca497a8e5
6 changed files with 10 additions and 10 deletions

View File

@ -47,7 +47,7 @@ List<T>::List(TypePtr elementType)
: List(make_intrusive<c10::detail::ListImpl>(
typename c10::detail::ListImpl::list_type(),
std::move(elementType))) {
static_assert(std::is_same_v<T, IValue> || std::is_same<T, c10::intrusive_ptr<ivalue::Future>>::value,
static_assert(std::is_same_v<T, IValue> || std::is_same_v<T, c10::intrusive_ptr<ivalue::Future>>,
"This constructor is only valid for c10::impl::GenericList or List<Future>.");
}

View File

@ -38,8 +38,8 @@ constexpr bool is_zarch_implemented_quant() {
template <typename T>
constexpr bool is_zarch_implemented_complex() {
return std::is_same<T, c10::complex<float>>::value ||
std::is_same<T, c10::complex<double>>::value;
return std::is_same_v<T, c10::complex<float>> ||
std::is_same_v<T, c10::complex<double>>;
}
constexpr int offset0 = 0;

View File

@ -478,7 +478,7 @@ constexpr int block_threads(){
template<typename scalar_t, typename ScanOpT>
inline void inclusive_deterministic_scan(const scalar_t * input, scalar_t * output, ScanOpT scan_op, int64_t num_items) {
static_assert(std::is_same<ScanOpT, std::plus<scalar_t>>::value, "");
static_assert(std::is_same_v<ScanOpT, std::plus<scalar_t>>, "");
constexpr int BLOCK_THREADS = block_threads<sizeof(scalar_t)>();
constexpr int ITEMS_PER_THREAD = 16;
auto grid_size = (num_items + BLOCK_THREADS * ITEMS_PER_THREAD - 1) / (BLOCK_THREADS * ITEMS_PER_THREAD);

View File

@ -451,7 +451,7 @@ void scan_dim(const TensorBase& self, const TensorBase& result,
TORCH_INTERNAL_ASSERT(result.is_contiguous());
if (self.numel() == self.size(dim)) {
if constexpr (std::is_same<BinaryFunction, std::plus<scalar_t>>::value) {
if constexpr (std::is_same_v<BinaryFunction, std::plus<scalar_t>>) {
if (C10_UNLIKELY(at::globalContext().deterministicAlgorithms()) && (self.is_floating_point() || self.is_complex())) {
# if (defined(CUDA_VERSION) && CUDA_VERSION > 11040) || defined(USE_ROCM)
cuda::cub::inclusive_deterministic_scan(self_->const_data_ptr<scalar_t>(), result.mutable_data_ptr<scalar_t>(), binary_op, self.numel());

View File

@ -211,8 +211,8 @@ struct CusparseMatrixMultiplyOp {
std::is_same_v<c10::BFloat16, scalar_t> ||
std::is_same_v<float, scalar_t> ||
std::is_same_v<double, scalar_t> ||
std::is_same<c10::complex<float>, scalar_t>::value ||
std::is_same<c10::complex<double>, scalar_t>::value,
std::is_same_v<c10::complex<float>, scalar_t> ||
std::is_same_v<c10::complex<double>, scalar_t>,
"cusparseSpGEMM only supports data type of half, bfloat16, float, double and complex float, double.");
// SpGEMM Computation
TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&spgemmDesc));
@ -673,8 +673,8 @@ void sparse_sparse_matmul_cuda_kernel(
std::is_same_v<c10::BFloat16, scalar_t> ||
std::is_same_v<float, scalar_t> ||
std::is_same_v<double, scalar_t> ||
std::is_same<c10::complex<float>, scalar_t>::value ||
std::is_same<c10::complex<double>, scalar_t>::value,
std::is_same_v<c10::complex<float>, scalar_t> ||
std::is_same_v<c10::complex<double>, scalar_t>,
"sparse_sparse_matmul_cuda_kernel only supports data type of half, bfloat16, float, double and complex float, double.");
// older versions of cusparse on Windows segfault for complex128 dtype

View File

@ -582,7 +582,7 @@ class TORCH_API Tensor: public TensorBase {
template <typename T>
using hook_return_void_t = std::enable_if_t<std::is_void<typename std::invoke_result_t<T&, Tensor>>::value, unsigned>;
template <typename T>
using hook_return_var_t = std::enable_if_t<std::is_same<typename std::invoke_result_t<T&, Tensor>, Tensor>::value, unsigned>;
using hook_return_var_t = std::enable_if_t<std::is_same_v<typename std::invoke_result_t<T&, Tensor>, Tensor>, unsigned>;
/// Registers a backward hook.
///