mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Skip compiledWithCuDNN() call for mobile to avoid segfault (#71775)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/71775 Mobile is running into segfaults at the `compiledWithCuDNN()` call as described in T110194934. This fix works around this with an #ifdef following the approach done [here](d32b7d9585/aten/src/ATen/native/Convolution.cpp (L1076-L1088)). TBD how to fix the underlying cause. Test Plan: Imported from OSS Reviewed By: anjali411 Differential Revision: D33778888 Pulled By: jbschlosser fbshipit-source-id: 2a22b2eaa858ee6adf5b3c25a1c470c6aebc3f87 (cherry picked from commite90a6bb402)
This commit is contained in:
parent
0891c908bb
commit
e04ade92ae
|
|
@ -176,6 +176,11 @@ auto ConvParams::needs_64bit_indexing_no_split(const at::Tensor& input, const at
|
|||
}
|
||||
|
||||
auto ConvParams::use_cudnn(const at::Tensor& input, const at::Tensor& weight) const -> bool {
|
||||
|
||||
// Note [Mobile check segfaults]
|
||||
// cudnn and miopen are guaranteed not to be on mobile, and T102591915 / T110194934 suggest
|
||||
// that maybe the compiledWithCuDNN() check sometimes segfaults (though I can't imagine how)
|
||||
#if !defined(C10_MOBILE)
|
||||
if (needs_64bit_indexing_no_split(input, weight)) {
|
||||
return false;
|
||||
}
|
||||
|
|
@ -199,6 +204,9 @@ auto ConvParams::use_cudnn(const at::Tensor& input, const at::Tensor& weight) co
|
|||
}
|
||||
}
|
||||
return !is_output_padding_big();
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
auto ConvParams::use_miopen(const at::Tensor& input, const at::Tensor& weight, bool bias_defined) const -> bool {
|
||||
|
|
@ -1074,9 +1082,7 @@ static inline at::MemoryFormat determine_backend_memory_format(
|
|||
at::MemoryFormat backend_memory_format = at::MemoryFormat::Contiguous;
|
||||
auto k = weight.ndimension();
|
||||
#if !defined(C10_MOBILE)
|
||||
// cudnn and miopen are guaranteed not to be on mobile, and T102591915
|
||||
// suggests that maybe the cudnn condition sometimes segfaults (though
|
||||
// I can't imagine how)
|
||||
// See Note [Mobile check segfaults]
|
||||
if (detail::getCUDAHooks().compiledWithCuDNN()) {
|
||||
backend_memory_format = cudnn_conv_suggest_memory_format(input, weight);
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user