diff --git a/aten/src/ATen/native/Convolution.cpp b/aten/src/ATen/native/Convolution.cpp index b933f066861..1b7edbe2e10 100644 --- a/aten/src/ATen/native/Convolution.cpp +++ b/aten/src/ATen/native/Convolution.cpp @@ -176,6 +176,11 @@ auto ConvParams::needs_64bit_indexing_no_split(const at::Tensor& input, const at } auto ConvParams::use_cudnn(const at::Tensor& input, const at::Tensor& weight) const -> bool { + +// Note [Mobile check segfaults] +// cudnn and miopen are guaranteed not to be on mobile, and T102591915 / T110194934 suggest +// that maybe the compiledWithCuDNN() check sometimes segfaults (though I can't imagine how) +#if !defined(C10_MOBILE) if (needs_64bit_indexing_no_split(input, weight)) { return false; } @@ -199,6 +204,9 @@ auto ConvParams::use_cudnn(const at::Tensor& input, const at::Tensor& weight) co } } return !is_output_padding_big(); +#else + return false; +#endif } auto ConvParams::use_miopen(const at::Tensor& input, const at::Tensor& weight, bool bias_defined) const -> bool { @@ -1074,9 +1082,7 @@ static inline at::MemoryFormat determine_backend_memory_format( at::MemoryFormat backend_memory_format = at::MemoryFormat::Contiguous; auto k = weight.ndimension(); #if !defined(C10_MOBILE) - // cudnn and miopen are guaranteed not to be on mobile, and T102591915 - // suggests that maybe the cudnn condition sometimes segfaults (though - // I can't imagine how) + // See Note [Mobile check segfaults] if (detail::getCUDAHooks().compiledWithCuDNN()) { backend_memory_format = cudnn_conv_suggest_memory_format(input, weight); }