mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Remove deprecated cuDNN convolution ops (#71128)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/71128 Test Plan: Imported from OSS Reviewed By: anjali411 Differential Revision: D33517677 Pulled By: jbschlosser fbshipit-source-id: 1690fd38a38ee7cf16865209280a9c457c5f70ff
This commit is contained in:
parent
93b2399c6c
commit
523d448968
|
|
@ -318,10 +318,6 @@ TORCH_LIBRARY_IMPL(aten, Autocast, m) {
|
|||
KERNEL(ADD_NS(conv_transpose2d), "conv_transpose2d.input", Tensor (const Tensor &, const Tensor &, const c10::optional<Tensor>&, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, IntArrayRef), lower_precision_fp)
|
||||
KERNEL(ADD_NS(conv_transpose3d), "conv_transpose3d.input", Tensor (const Tensor &, const Tensor &, const c10::optional<Tensor>&, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, IntArrayRef), lower_precision_fp)
|
||||
KERNEL(ADD_NS(convolution), "convolution", Tensor (const Tensor &, const Tensor &, const c10::optional<Tensor>&, IntArrayRef, IntArrayRef, IntArrayRef, bool, IntArrayRef, int64_t), lower_precision_fp)
|
||||
KERNEL(ADD_NS(cudnn_convolution), "cudnn_convolution.deprecated", Tensor (const Tensor &, const Tensor &, const c10::optional<Tensor>&, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool), lower_precision_fp)
|
||||
KERNEL(ADD_NS(cudnn_convolution_transpose), "cudnn_convolution_transpose.deprecated", Tensor (const Tensor &, const Tensor &, const c10::optional<Tensor>&, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool), lower_precision_fp)
|
||||
KERNEL(ADD_NS(cudnn_convolution), "cudnn_convolution.deprecated2", Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool), lower_precision_fp)
|
||||
KERNEL(ADD_NS(cudnn_convolution_transpose), "cudnn_convolution_transpose.deprecated2", Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool), lower_precision_fp)
|
||||
KERNEL(ADD_NS(cudnn_convolution), "cudnn_convolution", Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, bool), lower_precision_fp)
|
||||
KERNEL(ADD_NS(cudnn_convolution_transpose), "cudnn_convolution_transpose", Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, bool), lower_precision_fp)
|
||||
KERNEL(ADD_NS(prelu), "prelu", Tensor (const Tensor &, const Tensor &), lower_precision_fp)
|
||||
|
|
|
|||
|
|
@ -119,61 +119,4 @@ Tensor cudnn_convolution_add_relu(
|
|||
|
||||
#endif // AT_CUDNN_ENABLED
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
//
|
||||
// Deprecated operators
|
||||
//
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
// TODO (@zasdfgbnm): this is here only for compatibility, remove this in the future
|
||||
Tensor cudnn_convolution_deprecated(
|
||||
const Tensor& input, const Tensor& weight, const c10::optional<Tensor>& bias_opt /* optional */,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic) {
|
||||
// See [Note: hacky wrapper removal for optional tensor]
|
||||
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
|
||||
const Tensor& bias = *bias_maybe_owned;
|
||||
|
||||
auto output = at::cudnn_convolution(input, weight, padding, stride, dilation, groups, benchmark, deterministic);
|
||||
if (bias.defined()) {
|
||||
output = output + reshape_bias(input.dim(), bias);
|
||||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
// TODO (@zasdfgbnm): this is here only for compatibility, remove this in the future
|
||||
Tensor cudnn_convolution_deprecated2(
|
||||
const Tensor& input_t, const Tensor& weight_t,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic)
|
||||
{
|
||||
return at::cudnn_convolution(input_t, weight_t, padding, stride, dilation, groups, benchmark, deterministic, at::globalContext().allowTF32CuDNN());
|
||||
}
|
||||
|
||||
// TODO (@zasdfgbnm): this is here only for compatibility, remove this in the future
|
||||
Tensor cudnn_convolution_transpose_deprecated(
|
||||
const Tensor& input, const Tensor& weight, const c10::optional<Tensor>& bias_opt /* optional */,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic)
|
||||
{
|
||||
// See [Note: hacky wrapper removal for optional tensor]
|
||||
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
|
||||
const Tensor& bias = *bias_maybe_owned;
|
||||
|
||||
auto output = at::cudnn_convolution_transpose(input, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
|
||||
if (bias.defined()) {
|
||||
output = output + reshape_bias(input.dim(), bias);
|
||||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
// TODO (@zasdfgbnm): this is here only for compatibility, remove this in the future
|
||||
Tensor cudnn_convolution_transpose_deprecated2(
|
||||
const Tensor& input_t, const Tensor& weight_t,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic)
|
||||
{
|
||||
return at::cudnn_convolution_transpose(input_t, weight_t, padding, output_padding, stride, dilation, groups, benchmark, deterministic, at::globalContext().allowTF32CuDNN());
|
||||
}
|
||||
|
||||
}}
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@
|
|||
//
|
||||
// ConvPlaceholders.cpp contains placeholder implementation of cudnn
|
||||
// convolution when cudnn is not enabled. These operators only raises
|
||||
// errors, and do no real computation. This file also contains deprecated
|
||||
// operators. These operators are implemented using currnet operators.
|
||||
// errors, and do no real computation. These operators are implemented
|
||||
// using currnet operators.
|
||||
//
|
||||
// cuDNN v7 and v8 have different API. ConvShared.{cpp, h} contains
|
||||
// code shared by v7 and v8. Conv_v7.cpp contains implementation of
|
||||
|
|
|
|||
|
|
@ -1445,26 +1445,10 @@
|
|||
dispatch:
|
||||
CUDA: cudnn_batch_norm_backward
|
||||
|
||||
- func: cudnn_convolution.deprecated(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
|
||||
dispatch:
|
||||
CUDA: cudnn_convolution_deprecated
|
||||
|
||||
- func: cudnn_convolution.deprecated2(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
|
||||
dispatch:
|
||||
CUDA: cudnn_convolution_deprecated2
|
||||
|
||||
- func: cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
|
||||
dispatch:
|
||||
CUDA: cudnn_convolution
|
||||
|
||||
- func: cudnn_convolution_transpose.deprecated(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
|
||||
dispatch:
|
||||
CUDA: cudnn_convolution_transpose_deprecated
|
||||
|
||||
- func: cudnn_convolution_transpose.deprecated2(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
|
||||
dispatch:
|
||||
CUDA: cudnn_convolution_transpose_deprecated2
|
||||
|
||||
- func: cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
|
||||
dispatch:
|
||||
CUDA: cudnn_convolution_transpose
|
||||
|
|
|
|||
|
|
@ -54,6 +54,10 @@ ALLOW_LIST = [
|
|||
("aten::randperm", datetime.date(9999, 1, 1)),
|
||||
("aten::_conv_depthwise2d_backward", datetime.date(2022, 1, 31)),
|
||||
("aten::conv_depthwise3d_backward", datetime.date(2022, 1, 31)),
|
||||
("aten::cudnn_convolution.deprecated", datetime.date(2022, 1, 31)),
|
||||
("aten::cudnn_convolution.deprecated2", datetime.date(2022, 1, 31)),
|
||||
("aten::cudnn_convolution_transpose.deprecated", datetime.date(2022, 1, 31)),
|
||||
("aten::cudnn_convolution_transpose.deprecated2", datetime.date(2022, 1, 31)),
|
||||
("aten::cudnn_convolution_backward", datetime.date(2022, 1, 31)),
|
||||
("aten::cudnn_convolution_backward_input", datetime.date(2022, 1, 31)),
|
||||
("aten::cudnn_convolution_backward_weight", datetime.date(2022, 1, 31)),
|
||||
|
|
|
|||
|
|
@ -93,14 +93,6 @@ class AutocastTestLists(object):
|
|||
("conv_transpose2d", conv_args_fp32[1]),
|
||||
("conv_transpose3d", conv_args_fp32[2]),
|
||||
("convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1)),
|
||||
# deprecated cudnn_convolutions with bias
|
||||
("cudnn_convolution", conv_args_fp32[1] + bias_fp32 + ((0, 0), (1, 1), (1, 1), 1, False, True), TEST_WITH_ROCM),
|
||||
("cudnn_convolution_transpose", conv_args_fp32[1] + bias_fp32 + ((0, 0), (0, 0), (1, 1),
|
||||
(1, 1), 1, False, True), TEST_WITH_ROCM),
|
||||
# deprecated cudnn_convolutions with no allow_tf32 flag
|
||||
("cudnn_convolution", conv_args_fp32[1] + ((0, 0), (1, 1), (1, 1), 1, False, True), TEST_WITH_ROCM),
|
||||
("cudnn_convolution_transpose", conv_args_fp32[1] + ((0, 0), (0, 0), (1, 1), (1, 1), 1, False, True), TEST_WITH_ROCM),
|
||||
# the current cudnn_convolutions
|
||||
("cudnn_convolution", conv_args_fp32[1] + ((0, 0), (1, 1), (1, 1), 1, False, True, True), TEST_WITH_ROCM),
|
||||
("cudnn_convolution_transpose", conv_args_fp32[1] + ((0, 0), (0, 0), (1, 1),
|
||||
(1, 1), 1, False, True, True), TEST_WITH_ROCM),
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user