Remove C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA and CONSTEXPR_EXCEPT_WIN_CUDA (#138479)

BC linter suppressed due to removal of `tools/linter/adapters/constexpr_linter.py`
Pull Request resolved: https://github.com/pytorch/pytorch/pull/138479
Approved by: https://github.com/eqy, https://github.com/malfet
This commit is contained in:
Richard Barnes 2024-10-24 07:51:02 +00:00 committed by PyTorch MergeBot
parent 96b30dcb25
commit dbf0fa811a
61 changed files with 109 additions and 272 deletions

View File

@ -376,17 +376,6 @@ command = [
]
is_formatter = true
[[linter]]
code = 'CONSTEXPR'
include_patterns=['aten/src/ATen/native/cuda/*.cu']
command = [
'python3',
'tools/linter/adapters/constexpr_linter.py',
'--',
'@{{PATHSFILE}}',
]
is_formatter = true
[[linter]]
code = 'SPACES'
include_patterns = ['**']

View File

@ -15,7 +15,7 @@ struct AbsFunctor {
}
};
CONSTEXPR_EXCEPT_WIN_CUDA char abs_name[] = "abs_kernel";
constexpr char abs_name[] = "abs_kernel";
void abs_kernel_cuda(TensorIteratorBase& iter) {
auto dtype = iter.dtype();
if (at::isComplexType(dtype)) {

View File

@ -16,7 +16,7 @@
namespace at::native {
namespace binary_internal {
CONSTEXPR_EXCEPT_WIN_CUDA char div_name[] = "div_kernel";
constexpr char div_name[] = "div_kernel";
void div_true_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (iter.common_dtype() == kComplexHalf) {

View File

@ -11,7 +11,7 @@
namespace at::native {
CONSTEXPR_EXCEPT_WIN_CUDA char logical_and_name[] = "logical_and_kernel";
constexpr char logical_and_name[] = "logical_and_kernel";
void logical_and_kernel_cuda(TensorIterator& iter) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {
@ -48,7 +48,7 @@ void logical_and_kernel_cuda(TensorIterator& iter) {
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char logical_or_name[] = "logical_or_kernel";
constexpr char logical_or_name[] = "logical_or_kernel";
void logical_or_kernel_cuda(TensorIterator& iter) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {
@ -84,7 +84,7 @@ void logical_or_kernel_cuda(TensorIterator& iter) {
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char logical_xor_name[] = "logical_xor_kernel";
constexpr char logical_xor_name[] = "logical_xor_kernel";
void logical_xor_kernel_cuda(TensorIterator& iter) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {

View File

@ -15,7 +15,7 @@
namespace at::native {
CONSTEXPR_EXCEPT_WIN_CUDA char sigmoid_backward_name[] = "sigmoid_backward";
constexpr char sigmoid_backward_name[] = "sigmoid_backward";
void sigmoid_backward_kernel_cuda(TensorIteratorBase& iter) {
auto dtype = iter.dtype();
if(isComplexType(dtype)) {
@ -86,7 +86,7 @@ void logit_backward_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scal
});
}
CONSTEXPR_EXCEPT_WIN_CUDA char tanh_backward_name[] = "tanh_backward";
constexpr char tanh_backward_name[] = "tanh_backward";
void tanh_backward_kernel_cuda(TensorIteratorBase& iter) {
auto dtype = iter.dtype();
if(isComplexType(dtype)) {

View File

@ -18,7 +18,7 @@
namespace at::native {
CONSTEXPR_EXCEPT_WIN_CUDA char mul_name[] = "mul_kernel";
constexpr char mul_name[] = "mul_kernel";
void mul_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (common_dtype == kComplexHalf) {

View File

@ -14,7 +14,7 @@
namespace at::native {
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_CUDA char gcd_name[] = "gcd";
constexpr char gcd_name[] = "gcd";
void gcd_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "gcd_cuda", [&]() {
@ -33,7 +33,7 @@ void gcd_kernel_cuda(TensorIteratorBase& iter) {
}
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_CUDA char lcm_name[] = "lcm";
constexpr char lcm_name[] = "lcm";
void lcm_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "lcm_cuda", [&]() {

View File

@ -9,7 +9,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char lerp_tensor_name[] = "lerp_tensor";
constexpr char lerp_tensor_name[] = "lerp_tensor";
void lerp_tensor_kernel(at::TensorIteratorBase& iter) {
auto dtype = iter.common_dtype();
if(at::isComplexType(dtype)) {
@ -63,7 +63,7 @@ void lerp_tensor_kernel(at::TensorIteratorBase& iter) {
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char lerp_scalar_name[] = "lerp_scalar";
constexpr char lerp_scalar_name[] = "lerp_scalar";
void lerp_scalar_kernel(at::TensorIteratorBase& iter, const c10::Scalar& weight) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {

View File

@ -12,7 +12,7 @@
namespace at::native {
#if AT_USE_JITERATOR() && CUDA_VERSION >= 11050
CONSTEXPR_EXCEPT_WIN_CUDA char addcmul_name[] = "addcmul";
constexpr char addcmul_name[] = "addcmul";
#endif
void addcmul_cuda_kernel(TensorIteratorBase& iter, const Scalar& value) {
auto dtype = iter.common_dtype();
@ -59,7 +59,7 @@ void addcmul_cuda_kernel(TensorIteratorBase& iter, const Scalar& value) {
#if AT_USE_JITERATOR() && CUDA_VERSION >= 11050
// return a + alpha * (b / static_cast<accscalar_t>(c));
CONSTEXPR_EXCEPT_WIN_CUDA char addcdiv_name[] = "addcdiv";
constexpr char addcdiv_name[] = "addcdiv";
#endif
void addcdiv_cuda_kernel(TensorIteratorBase& iter, const Scalar& value) {
auto dtype = iter.common_dtype();

View File

@ -38,7 +38,7 @@ void pow_scalar_tensor_impl(TensorIteratorBase& iter, c10::complex<value_t> base
}
/* complex<Half> support impl */
CONSTEXPR_EXCEPT_WIN_CUDA char pow_scalar_base_name[] = "pow_scalar_base_kernel";
constexpr char pow_scalar_base_name[] = "pow_scalar_base_kernel";
template <>
void pow_scalar_tensor_impl(TensorIteratorBase& iter, c10::complex<at::Half> base) {
using scalar_t = c10::complex<at::Half>;
@ -68,7 +68,7 @@ namespace {
#if AT_USE_JITERATOR()
/* complex<Half> support impl */
CONSTEXPR_EXCEPT_WIN_CUDA char pow_name[] = "pow_kernel";
constexpr char pow_name[] = "pow_kernel";
static const auto pow_kernel_string =
jiterator_stringify(template <typename T> T pow_kernel(T base, T exp) {
return std::pow(base, exp);

View File

@ -21,7 +21,7 @@ struct sum_functor {
};
// jiterated specialization for `complex<Half>`
CONSTEXPR_EXCEPT_WIN_CUDA char sum_name[] = "sum";
constexpr char sum_name[] = "sum";
template <>
struct sum_functor<c10::complex<at::Half>> {
// jiterator reduction fails on windows
@ -57,7 +57,7 @@ struct nansum_functor {
}
};
CONSTEXPR_EXCEPT_WIN_CUDA char nansum_name[] = "nansum";
constexpr char nansum_name[] = "nansum";
template <typename scalar_t>
struct nansum_functor_complex {
#if AT_USE_JITERATOR()
@ -79,7 +79,7 @@ struct nansum_functor_complex {
#endif
};
CONSTEXPR_EXCEPT_WIN_CUDA char prod_name[] = "prod";
constexpr char prod_name[] = "prod";
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = scalar_t>
struct prod_functor {
// jiterator reduction fails on windows

View File

@ -26,7 +26,7 @@ __host__ __device__ static inline c10::complex<T> angle_wrapper(c10::complex<T>
}
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char angle_name[] = "angle_kernel";
constexpr char angle_name[] = "angle_kernel";
#endif
void angle_kernel_cuda(TensorIteratorBase& iter) {
@ -63,7 +63,7 @@ void angle_kernel_cuda(TensorIteratorBase& iter) {
}
// NB: Ignores the negative bit on tensors
CONSTEXPR_EXCEPT_WIN_CUDA char conj_name[] = "conj_kernel";
constexpr char conj_name[] = "conj_kernel";
void conj_kernel_cuda(TensorIteratorBase& iter) {
auto conj_chalf = [&] {
using scalar_t = c10::complex<at::Half>;

View File

@ -13,7 +13,7 @@
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char digamma_name[] = "digamma";
constexpr char digamma_name[] = "digamma";
#endif // AT_USE_JITERATOR()
// See note [Jiterator]
void digamma_kernel_cuda(TensorIteratorBase& iter) {
@ -40,7 +40,7 @@ void digamma_kernel_cuda(TensorIteratorBase& iter) {
}
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_CUDA char trigamma_name[] = "trigamma";
constexpr char trigamma_name[] = "trigamma";
void trigamma_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(
@ -64,7 +64,7 @@ void trigamma_kernel_cuda(TensorIteratorBase& iter) {
#endif // AT_USE_JITERATOR()
}
CONSTEXPR_EXCEPT_WIN_CUDA char polygamma_name[] = "polygamma";
constexpr char polygamma_name[] = "polygamma";
void polygamma_kernel_cuda(TensorIteratorBase& iter, int64_t n) {
if (n == 0) {
digamma_kernel_cuda(iter);
@ -101,7 +101,7 @@ void polygamma_kernel_cuda(TensorIteratorBase& iter, int64_t n) {
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char lgamma_name[] = "lgamma_kernel";
constexpr char lgamma_name[] = "lgamma_kernel";
void lgamma_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(

View File

@ -13,7 +13,7 @@
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char acos_name[] = "acos_impl";
constexpr char acos_name[] = "acos_impl";
#endif
void acos_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();

View File

@ -13,7 +13,7 @@
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char acosh_name[] = "acosh_impl";
constexpr char acosh_name[] = "acosh_impl";
#endif
void acosh_kernel_cuda(TensorIteratorBase& iter) {

View File

@ -13,7 +13,7 @@
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char asin_name[] = "asin_impl";
constexpr char asin_name[] = "asin_impl";
#endif
void asin_kernel_cuda(TensorIteratorBase& iter) {

View File

@ -13,7 +13,7 @@
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char asinh_name[] = "asinh_impl";
constexpr char asinh_name[] = "asinh_impl";
#endif
void asinh_kernel_cuda(TensorIteratorBase& iter) {

View File

@ -13,7 +13,7 @@
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char atan_name[] = "atan_impl";
constexpr char atan_name[] = "atan_impl";
#endif
void atan_kernel_cuda(TensorIteratorBase& iter) {

View File

@ -13,7 +13,7 @@
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char atanh_name[] = "atanh_impl";
constexpr char atanh_name[] = "atanh_impl";
#endif
void atanh_kernel_cuda(TensorIteratorBase& iter) {

View File

@ -13,7 +13,7 @@
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char cos_name[] = "cos_impl";
constexpr char cos_name[] = "cos_impl";
#endif // AT_USE_JITERATOR()
void cos_kernel_cuda(TensorIteratorBase& iter) {

View File

@ -13,7 +13,7 @@
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char cosh_name[] = "cosh_impl";
constexpr char cosh_name[] = "cosh_impl";
#endif
void cosh_kernel_cuda(TensorIteratorBase& iter) {

View File

@ -13,7 +13,7 @@
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char sin_name[] = "sin_impl";
constexpr char sin_name[] = "sin_impl";
#endif
void sin_kernel_cuda(TensorIteratorBase& iter) {

View File

@ -13,7 +13,7 @@
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char sinh_name[] = "sinh_impl";
constexpr char sinh_name[] = "sinh_impl";
#endif
void sinh_kernel_cuda(TensorIteratorBase& iter) {

View File

@ -13,7 +13,7 @@
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char tan_name[] = "tan_impl";
constexpr char tan_name[] = "tan_impl";
#endif
void tan_kernel_cuda(TensorIteratorBase& iter) {

View File

@ -13,7 +13,7 @@
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char tanh_name[] = "tanh_impl";
constexpr char tanh_name[] = "tanh_impl";
#endif
void tanh_kernel_cuda(TensorIteratorBase& iter) {

View File

@ -13,7 +13,7 @@
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char log_name[] = "log_kernel";
constexpr char log_name[] = "log_kernel";
#endif
void log_kernel_cuda(TensorIteratorBase& iter) {
@ -47,7 +47,7 @@ void log_kernel_cuda(TensorIteratorBase& iter) {
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char log10_name[] = "log10_kernel";
constexpr char log10_name[] = "log10_kernel";
void log10_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
@ -84,7 +84,7 @@ void log1p_kernel_cuda(TensorIteratorBase& iter) {
});
}
CONSTEXPR_EXCEPT_WIN_CUDA char log2_name[] = "log2_kernel";
constexpr char log2_name[] = "log2_kernel";
void log2_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {

View File

@ -34,7 +34,7 @@ void bitwise_not_kernel_cuda(TensorIteratorBase& iter) {
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char exp_name[] = "exp_kernel";
constexpr char exp_name[] = "exp_kernel";
void exp_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
@ -92,7 +92,7 @@ C10_HOST_DEVICE static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) {
return one / ::sqrt(v);
}
CONSTEXPR_EXCEPT_WIN_CUDA char rsqrt_name[] = "rsqrt_kernel";
constexpr char rsqrt_name[] = "rsqrt_kernel";
void rsqrt_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
@ -131,7 +131,7 @@ void rsqrt_kernel_cuda(TensorIteratorBase& iter) {
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char sqrt_name[] = "sqrt_kernel";
constexpr char sqrt_name[] = "sqrt_kernel";
void sqrt_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {

View File

@ -25,7 +25,7 @@ void logical_not_kernel_cuda(TensorIteratorBase& iter) {
}
// NB: Ignores the negative bit on tensors
CONSTEXPR_EXCEPT_WIN_CUDA char neg_name[] = "neg_kernel";
constexpr char neg_name[] = "neg_kernel";
void neg_kernel_cuda(TensorIteratorBase& iter) {
auto dtype = iter.dtype();
if (at::isComplexType(dtype)) {
@ -96,7 +96,7 @@ C10_HOST_DEVICE static inline c10::complex<T> sgn_wrapper(c10::complex<T> z) {
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char sgn_name[] = "sgn_kernel";
constexpr char sgn_name[] = "sgn_kernel";
void sgn_kernel_cuda(TensorIteratorBase& iter){
auto dtype = iter.dtype();
#if AT_USE_JITERATOR()

View File

@ -19,7 +19,7 @@
namespace at::native {
CONSTEXPR_EXCEPT_WIN_CUDA char exp2_name[] = "exp2_kernel";
constexpr char exp2_name[] = "exp2_kernel";
void exp2_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
@ -41,7 +41,7 @@ void exp2_kernel_cuda(TensorIteratorBase& iter) {
#endif
}
CONSTEXPR_EXCEPT_WIN_CUDA char i0_name[] = "i0";
constexpr char i0_name[] = "i0";
void i0_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0_cuda", [&]() {
@ -63,7 +63,7 @@ void i0_kernel_cuda(TensorIteratorBase& iter) {
}
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_CUDA char i0e_name[] = "calc_i0e";
constexpr char i0e_name[] = "calc_i0e";
void i0e_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() {
@ -84,7 +84,7 @@ void i0e_kernel_cuda(TensorIteratorBase& iter) {
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_CUDA char i1_name[] = "i1";
constexpr char i1_name[] = "i1";
void i1_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i1_cuda", [&]() {
@ -102,7 +102,7 @@ void i1_kernel_cuda(TensorIteratorBase& iter) {
#endif // AT_USE_JITERATOR()
}
CONSTEXPR_EXCEPT_WIN_CUDA char i1e_name[] = "i1e";
constexpr char i1e_name[] = "i1e";
void i1e_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i1e_cuda", [&]() {
@ -120,7 +120,7 @@ void i1e_kernel_cuda(TensorIteratorBase& iter) {
#endif
}
CONSTEXPR_EXCEPT_WIN_CUDA char sigmoid_name[] = "sigmoid";
constexpr char sigmoid_name[] = "sigmoid";
void sigmoid_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
@ -159,7 +159,7 @@ void sigmoid_kernel_cuda(TensorIteratorBase& iter) {
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char sinc_name[] = "sinc";
constexpr char sinc_name[] = "sinc";
void sinc_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
@ -217,7 +217,7 @@ void logit_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scalar) {
});
}
CONSTEXPR_EXCEPT_WIN_CUDA char ndtri_name[] = "ndtri";
constexpr char ndtri_name[] = "ndtri";
void ndtri_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "ndtri_cuda", [&]() {
@ -234,7 +234,7 @@ void ndtri_kernel_cuda(TensorIteratorBase& iter) {
#endif
}
CONSTEXPR_EXCEPT_WIN_CUDA char log_ndtr_name[] = "log_ndtr";
constexpr char log_ndtr_name[] = "log_ndtr";
void log_ndtr_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "log_ndtr_cuda", [&]() {
@ -259,7 +259,7 @@ void erf_kernel_cuda(TensorIteratorBase& iter) {
});
}
CONSTEXPR_EXCEPT_WIN_CUDA char erfc_name[] = "erfc_kernel";
constexpr char erfc_name[] = "erfc_kernel";
void erfc_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "erfc_cuda", [&]() {
@ -278,7 +278,7 @@ void erfc_kernel_cuda(TensorIteratorBase& iter) {
#endif
}
CONSTEXPR_EXCEPT_WIN_CUDA char erfinv_name[] = "erfinv_kernel";
constexpr char erfinv_name[] = "erfinv_kernel";
void erfinv_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "erfinv_cuda", [&]() {
@ -297,7 +297,7 @@ void erfinv_kernel_cuda(TensorIteratorBase& iter) {
#endif
}
CONSTEXPR_EXCEPT_WIN_CUDA char erfcx_name[] = "erfcx";
constexpr char erfcx_name[] = "erfcx";
void erfcx_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "erfcx_cuda", [&]() {
@ -314,7 +314,7 @@ void erfcx_kernel_cuda(TensorIteratorBase& iter) {
#endif
}
CONSTEXPR_EXCEPT_WIN_CUDA char kaiser_window_name[] = "kaiser_window";
constexpr char kaiser_window_name[] = "kaiser_window";
void kaiser_window_kernel_cuda(TensorIteratorBase& iter, int64_t window_length, double beta_){
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){
@ -348,7 +348,7 @@ void kaiser_window_kernel_cuda(TensorIteratorBase& iter, int64_t window_length,
#endif
}
CONSTEXPR_EXCEPT_WIN_CUDA char entr_name[] = "entr";
constexpr char entr_name[] = "entr";
void entr_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "entr_cuda", [&]() {

View File

@ -15,7 +15,7 @@ namespace {
* See note [3-Clause BSD License for the Cephes Math Library].
*/
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_CUDA char zeta_name[] = "zeta";
constexpr char zeta_name[] = "zeta";
void zeta_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_cuda", [&]() {

View File

@ -20,7 +20,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char airy_ai_name[] = "airy_ai_forward";
constexpr char airy_ai_name[] = "airy_ai_forward";
void airy_ai_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -20,7 +20,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char bessel_j0_name[] = "bessel_j0_forward";
constexpr char bessel_j0_name[] = "bessel_j0_forward";
void bessel_j0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -20,7 +20,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char bessel_j1_name[] = "bessel_j1_forward";
constexpr char bessel_j1_name[] = "bessel_j1_forward";
void bessel_j1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -20,7 +20,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char bessel_y0_name[] = "bessel_y0_forward";
constexpr char bessel_y0_name[] = "bessel_y0_forward";
void bessel_y0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -20,7 +20,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char bessel_y1_name[] = "bessel_y1_forward";
constexpr char bessel_y1_name[] = "bessel_y1_forward";
void bessel_y1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -10,7 +10,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_t_name[] = "chebyshev_polynomial_t_forward";
constexpr char chebyshev_polynomial_t_name[] = "chebyshev_polynomial_t_forward";
void chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -10,7 +10,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_u_name[] = "chebyshev_polynomial_u_forward";
constexpr char chebyshev_polynomial_u_name[] = "chebyshev_polynomial_u_forward";
void chebyshev_polynomial_u_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -10,7 +10,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_v_name[] = "chebyshev_polynomial_v_forward";
constexpr char chebyshev_polynomial_v_name[] = "chebyshev_polynomial_v_forward";
void chebyshev_polynomial_v_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -10,7 +10,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_w_name[] = "chebyshev_polynomial_w_forward";
constexpr char chebyshev_polynomial_w_name[] = "chebyshev_polynomial_w_forward";
void chebyshev_polynomial_w_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -10,7 +10,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char hermite_polynomial_h_name[] = "hermite_polynomial_h_forward";
constexpr char hermite_polynomial_h_name[] = "hermite_polynomial_h_forward";
void hermite_polynomial_h_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -10,7 +10,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char hermite_polynomial_he_name[] = "hermite_polynomial_he_forward";
constexpr char hermite_polynomial_he_name[] = "hermite_polynomial_he_forward";
void hermite_polynomial_he_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -10,7 +10,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char laguerre_polynomial_l_name[] = "laguerre_polynomial_l_forward";
constexpr char laguerre_polynomial_l_name[] = "laguerre_polynomial_l_forward";
void laguerre_polynomial_l_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -20,7 +20,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_i0_name[] = "modified_bessel_i0_forward";
constexpr char modified_bessel_i0_name[] = "modified_bessel_i0_forward";
void modified_bessel_i0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -20,7 +20,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_i1_name[] = "modified_bessel_i1_forward";
constexpr char modified_bessel_i1_name[] = "modified_bessel_i1_forward";
void modified_bessel_i1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -20,7 +20,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_k0_name[] = "modified_bessel_k0_forward";
constexpr char modified_bessel_k0_name[] = "modified_bessel_k0_forward";
void modified_bessel_k0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -20,7 +20,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_k1_name[] = "modified_bessel_k1_forward";
constexpr char modified_bessel_k1_name[] = "modified_bessel_k1_forward";
void modified_bessel_k1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -20,7 +20,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char scaled_modified_bessel_k0_name[] = "scaled_modified_bessel_k0_forward";
constexpr char scaled_modified_bessel_k0_name[] = "scaled_modified_bessel_k0_forward";
void scaled_modified_bessel_k0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -20,7 +20,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char scaled_modified_bessel_k1_name[] = "scaled_modified_bessel_k1_forward";
constexpr char scaled_modified_bessel_k1_name[] = "scaled_modified_bessel_k1_forward";
void scaled_modified_bessel_k1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -10,7 +10,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_t_name[] = "shifted_chebyshev_polynomial_t_forward";
constexpr char shifted_chebyshev_polynomial_t_name[] = "shifted_chebyshev_polynomial_t_forward";
void shifted_chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -10,7 +10,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_u_name[] = "shifted_chebyshev_polynomial_u_forward";
constexpr char shifted_chebyshev_polynomial_u_name[] = "shifted_chebyshev_polynomial_u_forward";
void shifted_chebyshev_polynomial_u_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -10,7 +10,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_v_name[] = "shifted_chebyshev_polynomial_v_forward";
constexpr char shifted_chebyshev_polynomial_v_name[] = "shifted_chebyshev_polynomial_v_forward";
void shifted_chebyshev_polynomial_v_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -10,7 +10,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_w_name[] = "shifted_chebyshev_polynomial_w_forward";
constexpr char shifted_chebyshev_polynomial_w_name[] = "shifted_chebyshev_polynomial_w_forward";
void shifted_chebyshev_polynomial_w_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -20,7 +20,7 @@
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char spherical_bessel_j0_name[] = "spherical_bessel_j0_forward";
constexpr char spherical_bessel_j0_name[] = "spherical_bessel_j0_forward";
void spherical_bessel_j0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()

View File

@ -445,66 +445,14 @@ __host__ __device__
#define C10_ALWAYS_INLINE_UNLESS_MOBILE C10_ALWAYS_INLINE
#endif
#if defined(__CUDA_ARCH__)
#if defined(_MSC_VER) && defined(__CUDACC__)
#define CONSTEXPR_EXCEPT_WIN_CUDA const
#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA __host__
// Note [static constexpr char* members for windows NVCC]
// The Windows NVCC compiler doesn't handle static constexpr class members,
// although it's fixed in a later version.
// (see
// https://developercommunity.visualstudio.com/t/intellisense-error-c11-static-constexpr-member-ini/245425)
//
// If we want to ensure that our field is static under all builds, then we need
// to work around it specifically for windows NVCC by making it (a) const, (b)
// defined outside of the class definition We need to define it outside of the
// class definition because of the C++ standard; char* is not an integral type
// (see
// https://stackoverflow.com/questions/24278473/intellisense-a-member-of-type-const-char-const-cannot-have-an-in-class-in)
//
// So instead of this:
// struct Foo {
// static constexpr const char* name = "foo";
// }
// In Windows NVCC, we end up with this:
// struct Foo {
// static const char* name;
// }
// const char* Foo::name = "foo";
//
// This gives us a small perf hit for any code that wants to access these field
// members, but right now it isn't used in any perf-critical code paths.
#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \
static const char* field;
#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) \
const char* cls::field = val;
#else
#define CONSTEXPR_EXCEPT_WIN_CUDA constexpr
#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA __host__
#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \
static constexpr const char* field = val;
#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val)
#endif
#else
#if defined(_MSC_VER) && defined(__CUDACC__)
#define CONSTEXPR_EXCEPT_WIN_CUDA const
#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA
#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \
static const char* field;
#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) \
const char* cls::field = val;
#else
#if !defined(FBCODE_CAFFE2) && !defined(C10_NODEPRECATED)
#define CONSTEXPR_EXCEPT_WIN_CUDA constexpr
#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA constexpr
#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \
static constexpr const char* field = val;
static constexpr const char field[] = val;
#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val)
#endif
#endif
#endif // !defined(FBCODE_CAFFE2) && !defined(C10_NODEPRECATED)
#ifndef HAS_DEMANGLE
#if defined(__ANDROID__) || defined(_WIN32) || defined(__EMSCRIPTEN__)

View File

@ -76,13 +76,13 @@ class ArrayRef final {
constexpr ArrayRef(const T& OneElt) : Data(&OneElt), Length(1) {}
/// Construct an ArrayRef from a pointer and length.
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef(const T* data, size_t length)
constexpr ArrayRef(const T* data, size_t length)
: Data(data), Length(length) {
debugCheckNullptrInvariant();
}
/// Construct an ArrayRef from a range.
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef(const T* begin, const T* end)
constexpr ArrayRef(const T* begin, const T* end)
: Data(begin), Length(end - begin) {
debugCheckNullptrInvariant();
}
@ -182,14 +182,14 @@ class ArrayRef final {
}
/// front - Get the first element.
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& front() const {
constexpr const T& front() const {
TORCH_CHECK(
!empty(), "ArrayRef: attempted to access front() of empty list");
return Data[0];
}
/// back - Get the last element.
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& back() const {
constexpr const T& back() const {
TORCH_CHECK(!empty(), "ArrayRef: attempted to access back() of empty list");
return Data[Length - 1];
}
@ -200,8 +200,7 @@ class ArrayRef final {
}
/// slice(n, m) - Take M elements of the array starting at element N
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef<T> slice(size_t N, size_t M)
const {
constexpr ArrayRef<T> slice(size_t N, size_t M) const {
TORCH_CHECK(
N + M <= size(),
"ArrayRef: invalid slice, N = ",
@ -214,7 +213,7 @@ class ArrayRef final {
}
/// slice(n) - Chop off the first N elements of the array.
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef<T> slice(size_t N) const {
constexpr ArrayRef<T> slice(size_t N) const {
TORCH_CHECK(
N <= size(), "ArrayRef: invalid slice, N = ", N, "; size = ", size());
return slice(N, size() - N);
@ -228,7 +227,7 @@ class ArrayRef final {
}
/// Vector compatibility
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& at(size_t Index) const {
constexpr const T& at(size_t Index) const {
TORCH_CHECK(
Index < Length,
"ArrayRef: invalid index Index = ",

View File

@ -98,8 +98,10 @@ constexpr uint64_t crc64_table[] = {
0x29b7d047efec8728,
};
inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA uint64_t
crc64impl(uint64_t accumulator, const char* data, size_t size) {
inline constexpr uint64_t crc64impl(
uint64_t accumulator,
const char* data,
size_t size) {
for (size_t i = 0; i < size; ++i) {
accumulator =
crc64_table[(accumulator ^ data[i]) & 0xFF] ^ (accumulator >> 8);
@ -116,12 +118,11 @@ struct crc64_t final : IdWrapper<crc64_t, uint64_t> {
};
// CRC64 with Jones coefficients and an init value of 0.
inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA crc64_t
crc64(const char* str, size_t size) {
inline constexpr crc64_t crc64(const char* str, size_t size) {
return crc64_t{detail::crc64impl(0, str, size)};
}
inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA crc64_t crc64(c10::string_view str) {
inline constexpr crc64_t crc64(c10::string_view str) {
return crc64(str.data(), str.size());
}
} // namespace c10::util

View File

@ -71,7 +71,7 @@ class C10_API TypeIdentifier final
* is generated during run-time. Do NOT serialize the id for storage.
*/
template <typename T>
static C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA TypeIdentifier Get() noexcept {
static constexpr TypeIdentifier Get() noexcept {
return TypeIdentifier(c10::util::get_type_index<T>());
}
@ -425,7 +425,7 @@ class C10_API TypeMeta final {
// Below are static functions that can be called by passing a specific type.
template <class T>
static C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA TypeIdentifier Id() noexcept {
static constexpr TypeIdentifier Id() noexcept {
return TypeIdentifier::Get<T>();
}

View File

@ -1,96 +0,0 @@
"""
CONSTEXPR: Ensures users don't use vanilla constexpr since it causes issues
"""
from __future__ import annotations
import argparse
import json
import logging
import sys
from enum import Enum
from typing import NamedTuple
CONSTEXPR = "constexpr char"
CONSTEXPR_MACRO = "CONSTEXPR_EXCEPT_WIN_CUDA char"
LINTER_CODE = "CONSTEXPR"
class LintSeverity(str, Enum):
ERROR = "error"
class LintMessage(NamedTuple):
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
def check_file(filename: str) -> LintMessage | None:
logging.debug("Checking file %s", filename)
with open(filename) as f:
lines = f.readlines()
for idx, line in enumerate(lines):
if CONSTEXPR in line:
original = "".join(lines)
replacement = original.replace(CONSTEXPR, CONSTEXPR_MACRO)
logging.debug("replacement: %s", replacement)
return LintMessage(
path=filename,
line=idx,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="Vanilla constexpr used, prefer macros",
original=original,
replacement=replacement,
description="Vanilla constexpr used, prefer macros run `lintrunner --take CONSTEXPR -a` to apply changes.",
)
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="CONSTEXPR linter",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--verbose",
action="store_true",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
lint_messages = []
for filename in args.filenames:
lint_message = check_file(filename)
if lint_message is not None:
lint_messages.append(lint_message)
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)

View File

@ -593,7 +593,7 @@ struct TORCH_API ModulePolicy {
}
// are we going to return everything? If so, we can optimize the calculate
// of the size of the list.
static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
static constexpr bool all_slots = false;
};
struct TORCH_API ParameterPolicy {
@ -606,7 +606,7 @@ struct TORCH_API ParameterPolicy {
static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
return typ->is_parameter(i) && v.isTensor();
}
static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
static constexpr bool all_slots = false;
};
struct TORCH_API BufferPolicy {
@ -620,7 +620,7 @@ struct TORCH_API BufferPolicy {
return typ->getAttribute(i)->isSubtypeOf(*TensorType::get()) &&
typ->is_buffer(i);
}
static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
static constexpr bool all_slots = false;
};
struct TORCH_API AttributePolicy {
@ -633,7 +633,7 @@ struct TORCH_API AttributePolicy {
static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
return true;
}
static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = true;
static constexpr bool all_slots = true;
};
// take a Policy object, and make a version of it that returns the slot.

View File

@ -216,7 +216,7 @@ class TORCH_API Pickler {
// the left of a '::', its type cannot be deduced by the compiler so one must
// explicitly instantiate the template, i.e. push<int>(int) works, push(int)
// does not)
static CONSTEXPR_EXCEPT_WIN_CUDA size_t kBufferSize = 256;
static constexpr size_t kBufferSize = 256;
template <typename T>
void push(std::common_type_t<T> value) {
const char* begin = reinterpret_cast<const char*>(&value);

View File

@ -600,19 +600,15 @@ struct TORCH_API {name} {{
using schema = {sig.type()};
using ptr_schema = schema*;
// See Note [static constexpr char* members for windows NVCC]
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::{f.func.name.name}")
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "{f.func.name.overload_name}")
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, {cpp_string(str(f.func))})
static constexpr const char* name = "aten::{f.func.name.name}";
static constexpr const char* overload_name = "{f.func.name.overload_name}";
static constexpr const char* schema_str = {cpp_string(str(f.func))};
static {sig.defn(name="call", is_redispatching_fn=False)};
static {sig.defn(name="redispatch", is_redispatching_fn=True)};
}};"""
elif self.target is Target.DEFINITION:
defns = f"""
STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, name, "aten::{f.func.name.name}")
STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, overload_name, "{f.func.name.overload_name}")
STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, schema_str, {cpp_string(str(f.func))})
// aten::{f.func}
static C10_NOINLINE c10::TypedOperatorHandle<{name}::schema> create_{name}_typed_handle() {{
return c10::Dispatcher::singleton()