mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Replace is_same with is_same_v for concise syntax (#145450)
Replace `std::is_same<T, U>::value` with `std::is_same_v` for concise and consistent syntax with other code. Pull Request resolved: https://github.com/pytorch/pytorch/pull/145450 Approved by: https://github.com/huydhn
This commit is contained in:
parent
c159723c39
commit
6ca497a8e5
|
|
@ -47,7 +47,7 @@ List<T>::List(TypePtr elementType)
|
||||||
: List(make_intrusive<c10::detail::ListImpl>(
|
: List(make_intrusive<c10::detail::ListImpl>(
|
||||||
typename c10::detail::ListImpl::list_type(),
|
typename c10::detail::ListImpl::list_type(),
|
||||||
std::move(elementType))) {
|
std::move(elementType))) {
|
||||||
static_assert(std::is_same_v<T, IValue> || std::is_same<T, c10::intrusive_ptr<ivalue::Future>>::value,
|
static_assert(std::is_same_v<T, IValue> || std::is_same_v<T, c10::intrusive_ptr<ivalue::Future>>,
|
||||||
"This constructor is only valid for c10::impl::GenericList or List<Future>.");
|
"This constructor is only valid for c10::impl::GenericList or List<Future>.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -38,8 +38,8 @@ constexpr bool is_zarch_implemented_quant() {
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
constexpr bool is_zarch_implemented_complex() {
|
constexpr bool is_zarch_implemented_complex() {
|
||||||
return std::is_same<T, c10::complex<float>>::value ||
|
return std::is_same_v<T, c10::complex<float>> ||
|
||||||
std::is_same<T, c10::complex<double>>::value;
|
std::is_same_v<T, c10::complex<double>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr int offset0 = 0;
|
constexpr int offset0 = 0;
|
||||||
|
|
|
||||||
|
|
@ -478,7 +478,7 @@ constexpr int block_threads(){
|
||||||
|
|
||||||
template<typename scalar_t, typename ScanOpT>
|
template<typename scalar_t, typename ScanOpT>
|
||||||
inline void inclusive_deterministic_scan(const scalar_t * input, scalar_t * output, ScanOpT scan_op, int64_t num_items) {
|
inline void inclusive_deterministic_scan(const scalar_t * input, scalar_t * output, ScanOpT scan_op, int64_t num_items) {
|
||||||
static_assert(std::is_same<ScanOpT, std::plus<scalar_t>>::value, "");
|
static_assert(std::is_same_v<ScanOpT, std::plus<scalar_t>>, "");
|
||||||
constexpr int BLOCK_THREADS = block_threads<sizeof(scalar_t)>();
|
constexpr int BLOCK_THREADS = block_threads<sizeof(scalar_t)>();
|
||||||
constexpr int ITEMS_PER_THREAD = 16;
|
constexpr int ITEMS_PER_THREAD = 16;
|
||||||
auto grid_size = (num_items + BLOCK_THREADS * ITEMS_PER_THREAD - 1) / (BLOCK_THREADS * ITEMS_PER_THREAD);
|
auto grid_size = (num_items + BLOCK_THREADS * ITEMS_PER_THREAD - 1) / (BLOCK_THREADS * ITEMS_PER_THREAD);
|
||||||
|
|
|
||||||
|
|
@ -451,7 +451,7 @@ void scan_dim(const TensorBase& self, const TensorBase& result,
|
||||||
TORCH_INTERNAL_ASSERT(result.is_contiguous());
|
TORCH_INTERNAL_ASSERT(result.is_contiguous());
|
||||||
|
|
||||||
if (self.numel() == self.size(dim)) {
|
if (self.numel() == self.size(dim)) {
|
||||||
if constexpr (std::is_same<BinaryFunction, std::plus<scalar_t>>::value) {
|
if constexpr (std::is_same_v<BinaryFunction, std::plus<scalar_t>>) {
|
||||||
if (C10_UNLIKELY(at::globalContext().deterministicAlgorithms()) && (self.is_floating_point() || self.is_complex())) {
|
if (C10_UNLIKELY(at::globalContext().deterministicAlgorithms()) && (self.is_floating_point() || self.is_complex())) {
|
||||||
# if (defined(CUDA_VERSION) && CUDA_VERSION > 11040) || defined(USE_ROCM)
|
# if (defined(CUDA_VERSION) && CUDA_VERSION > 11040) || defined(USE_ROCM)
|
||||||
cuda::cub::inclusive_deterministic_scan(self_->const_data_ptr<scalar_t>(), result.mutable_data_ptr<scalar_t>(), binary_op, self.numel());
|
cuda::cub::inclusive_deterministic_scan(self_->const_data_ptr<scalar_t>(), result.mutable_data_ptr<scalar_t>(), binary_op, self.numel());
|
||||||
|
|
|
||||||
|
|
@ -211,8 +211,8 @@ struct CusparseMatrixMultiplyOp {
|
||||||
std::is_same_v<c10::BFloat16, scalar_t> ||
|
std::is_same_v<c10::BFloat16, scalar_t> ||
|
||||||
std::is_same_v<float, scalar_t> ||
|
std::is_same_v<float, scalar_t> ||
|
||||||
std::is_same_v<double, scalar_t> ||
|
std::is_same_v<double, scalar_t> ||
|
||||||
std::is_same<c10::complex<float>, scalar_t>::value ||
|
std::is_same_v<c10::complex<float>, scalar_t> ||
|
||||||
std::is_same<c10::complex<double>, scalar_t>::value,
|
std::is_same_v<c10::complex<double>, scalar_t>,
|
||||||
"cusparseSpGEMM only supports data type of half, bfloat16, float, double and complex float, double.");
|
"cusparseSpGEMM only supports data type of half, bfloat16, float, double and complex float, double.");
|
||||||
// SpGEMM Computation
|
// SpGEMM Computation
|
||||||
TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&spgemmDesc));
|
TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&spgemmDesc));
|
||||||
|
|
@ -673,8 +673,8 @@ void sparse_sparse_matmul_cuda_kernel(
|
||||||
std::is_same_v<c10::BFloat16, scalar_t> ||
|
std::is_same_v<c10::BFloat16, scalar_t> ||
|
||||||
std::is_same_v<float, scalar_t> ||
|
std::is_same_v<float, scalar_t> ||
|
||||||
std::is_same_v<double, scalar_t> ||
|
std::is_same_v<double, scalar_t> ||
|
||||||
std::is_same<c10::complex<float>, scalar_t>::value ||
|
std::is_same_v<c10::complex<float>, scalar_t> ||
|
||||||
std::is_same<c10::complex<double>, scalar_t>::value,
|
std::is_same_v<c10::complex<double>, scalar_t>,
|
||||||
"sparse_sparse_matmul_cuda_kernel only supports data type of half, bfloat16, float, double and complex float, double.");
|
"sparse_sparse_matmul_cuda_kernel only supports data type of half, bfloat16, float, double and complex float, double.");
|
||||||
|
|
||||||
// older versions of cusparse on Windows segfault for complex128 dtype
|
// older versions of cusparse on Windows segfault for complex128 dtype
|
||||||
|
|
|
||||||
|
|
@ -582,7 +582,7 @@ class TORCH_API Tensor: public TensorBase {
|
||||||
template <typename T>
|
template <typename T>
|
||||||
using hook_return_void_t = std::enable_if_t<std::is_void<typename std::invoke_result_t<T&, Tensor>>::value, unsigned>;
|
using hook_return_void_t = std::enable_if_t<std::is_void<typename std::invoke_result_t<T&, Tensor>>::value, unsigned>;
|
||||||
template <typename T>
|
template <typename T>
|
||||||
using hook_return_var_t = std::enable_if_t<std::is_same<typename std::invoke_result_t<T&, Tensor>, Tensor>::value, unsigned>;
|
using hook_return_var_t = std::enable_if_t<std::is_same_v<typename std::invoke_result_t<T&, Tensor>, Tensor>, unsigned>;
|
||||||
|
|
||||||
/// Registers a backward hook.
|
/// Registers a backward hook.
|
||||||
///
|
///
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user