From a8ca340ad6faa8ebb51fc364b891985843b5fe14 Mon Sep 17 00:00:00 2001 From: Hong Xu Date: Tue, 17 Mar 2020 08:55:17 -0700 Subject: [PATCH] Remove all uses of AT_CHECK and replace them with TORCH_CHECK (#34846) Summary: AT_CHECK has been deprecated and provides no more features than TORCH_CHECK Pull Request resolved: https://github.com/pytorch/pytorch/pull/34846 Differential Revision: D20481339 Pulled By: mrshenli fbshipit-source-id: 1777e769a069a78e03118270294e5e273d516ca7 --- aten/src/ATen/native/miopen/RNN_miopen.cpp | 44 +++++++++++----------- c10/util/Exception.h | 12 ------ torch/csrc/utils/tensor_new.cpp | 2 +- torch/lib/c10d/ProcessGroupGloo.cpp | 2 +- torch/lib/c10d/ProcessGroupNCCL.cpp | 2 +- 5 files changed, 25 insertions(+), 37 deletions(-) diff --git a/aten/src/ATen/native/miopen/RNN_miopen.cpp b/aten/src/ATen/native/miopen/RNN_miopen.cpp index 08c1e9e9511..7d0bcf98258 100644 --- a/aten/src/ATen/native/miopen/RNN_miopen.cpp +++ b/aten/src/ATen/native/miopen/RNN_miopen.cpp @@ -447,7 +447,7 @@ std::tuple miopen_rnn( fn.tensors.set(input.sizes(), fn_batch_sizes, batch_first); if (fn.rnn.rnn_mode != miopenLSTM) { - AT_CHECK(!cx.defined(), "miopen_rnn: illegal defined cx for non-LSTM RNN."); + TORCH_CHECK(!cx.defined(), "miopen_rnn: illegal defined cx for non-LSTM RNN."); } auto is_input_packed = fn.tensors.batch_sizes.size() != 0; @@ -458,8 +458,8 @@ std::tuple miopen_rnn( auto hidden_size = _hidden_size(fn.rnn, fn.tensors); auto output_size = _output_size(fn.rnn, fn.tensors); - AT_CHECK(hx.is_contiguous(), "miopen_rnn : hx is not contiguous."); - AT_CHECK(!cx.defined() || cx.is_contiguous(), "miopen_rnn : cx is not contiguous."); + TORCH_CHECK(hx.is_contiguous(), "miopen_rnn : hx is not contiguous."); + TORCH_CHECK(!cx.defined() || cx.is_contiguous(), "miopen_rnn : cx is not contiguous."); auto x = input.contiguous(); auto output = at::empty(output_size, input.options()); @@ -493,7 +493,7 @@ std::tuple miopen_rnn( _copyParams_and_permute(MatrixRef{weight, static_cast(weight_stride0)}, MatrixRef{params, params_stride0}, fn_mode); - AT_CHECK(!cx.defined() || cx.sizes().equals(hidden_size), "Expected cell size ", IntArrayRef{hidden_size}, ", got", cx.sizes()); + TORCH_CHECK(!cx.defined() || cx.sizes().equals(hidden_size), "Expected cell size ", IntArrayRef{hidden_size}, ", got", cx.sizes()); size_t workspace_size; auto x_descs_arr = descs.get_x_descs(); @@ -563,7 +563,7 @@ std::tuple miopen_rnn_backward_input( auto handle = getMiopenHandle(); if(fn.rnn.rnn_mode != miopenLSTM) { - AT_CHECK(!cx.defined(), "rnn: illegal defined cx for non-LSTM RNN"); + TORCH_CHECK(!cx.defined(), "rnn: illegal defined cx for non-LSTM RNN"); } auto is_input_packed = fn_batch_sizes.size() != 0; @@ -577,8 +577,8 @@ std::tuple miopen_rnn_backward_input( auto hidden_size = _hidden_size(fn.rnn, fn.tensors); auto output_size = _output_size(fn.rnn, fn.tensors); - AT_CHECK(hx.is_contiguous(), "rnn: hx is not contiguous"); - AT_CHECK(!cx.defined() || cx.is_contiguous(), "rnn: cx is not contiguous"); + TORCH_CHECK(hx.is_contiguous(), "rnn: hx is not contiguous"); + TORCH_CHECK(!cx.defined() || cx.is_contiguous(), "rnn: cx is not contiguous"); auto x = input.contiguous(); auto dy = grad_output.contiguous(); @@ -591,23 +591,23 @@ std::tuple miopen_rnn_backward_input( AT_ASSERTM(cx.defined() || !output_mask[2], "illegally required grad of cx for non-LSTM RNN"); auto dcx = cx.defined() ? at::empty(hidden_size, cx.options()) : Tensor(); - AT_CHECK(fn_train, "miopen RNN backward can only be called in training mode"); + TORCH_CHECK(fn_train, "miopen RNN backward can only be called in training mode"); - AT_CHECK(input.sizes().equals(input_size), + TORCH_CHECK(input.sizes().equals(input_size), "Expected input size ", IntArrayRef{input_size}, ", got ", input.sizes()); - AT_CHECK(output.sizes().equals(output_size), + TORCH_CHECK(output.sizes().equals(output_size), "Expected output size ", IntArrayRef{output_size}, ", got ", output.sizes()); - AT_CHECK(!hx.defined() || hx.sizes().equals(hidden_size), + TORCH_CHECK(!hx.defined() || hx.sizes().equals(hidden_size), "Expected hidden size ", IntArrayRef{hidden_size}, ", got ", hx.sizes()); - AT_CHECK(!cx.defined() || cx.sizes().equals(hidden_size), + TORCH_CHECK(!cx.defined() || cx.sizes().equals(hidden_size), "Expected cell size ", IntArrayRef{hidden_size}, ", got ", cx.sizes()); - AT_CHECK(!dhy.defined() || dhy.sizes().equals(hidden_size), + TORCH_CHECK(!dhy.defined() || dhy.sizes().equals(hidden_size), "Expected d_hidden size ", IntArrayRef{hidden_size}, ", got ", dhy.sizes()); - AT_CHECK(!dcy.defined() || dcy.sizes().equals(hidden_size), + TORCH_CHECK(!dcy.defined() || dcy.sizes().equals(hidden_size), "Expected d_cell size ", IntArrayRef{hidden_size}, ", got ", dcy.sizes()); - AT_CHECK(dhy.is_cuda() && dy.is_cuda() && (!dcy.defined() || dcy.is_cuda()), + TORCH_CHECK(dhy.is_cuda() && dy.is_cuda() && (!dcy.defined() || dcy.is_cuda()), "Gradients aren't HIP tensors"); miopenRNNAlgo_t algo = miopenRNNdefault; @@ -679,7 +679,7 @@ std::vector miopen_rnn_backward_weight( auto handle = getMiopenHandle(); if (fn.rnn.rnn_mode != miopenLSTM) { - AT_CHECK(!cx.defined(), "rnn: illegal defined cx for non-LSTM RNN"); + TORCH_CHECK(!cx.defined(), "rnn: illegal defined cx for non-LSTM RNN"); } auto is_input_packed = fn_batch_sizes.size() != 0; @@ -691,15 +691,15 @@ std::vector miopen_rnn_backward_weight( auto input_size = _input_size(fn.tensors); auto hidden_size = _hidden_size(fn.rnn, fn.tensors); - AT_CHECK(fn_train, "miopen RNN backward can only be called in training mode"); + TORCH_CHECK(fn_train, "miopen RNN backward can only be called in training mode"); - AT_CHECK(input.sizes().equals(input_size), + TORCH_CHECK(input.sizes().equals(input_size), "Expected input size ", IntArrayRef{input_size}, ", got ", input.sizes()); - AT_CHECK(!hx.defined() || hx.sizes().equals(hidden_size), + TORCH_CHECK(!hx.defined() || hx.sizes().equals(hidden_size), "Expected hidden size ", IntArrayRef{hidden_size}, ", got ", hx.sizes()); - AT_CHECK(hx.is_contiguous(), "rnn: hx is not contiguous"); - AT_CHECK(!cx.defined() || cx.is_contiguous(), "rnn: cx is not contiguous"); + TORCH_CHECK(hx.is_contiguous(), "rnn: hx is not contiguous"); + TORCH_CHECK(!cx.defined() || cx.is_contiguous(), "rnn: cx is not contiguous"); auto x = input.contiguous(); const auto& y = output; @@ -808,7 +808,7 @@ std::pair _miopen_impl( std::tie(hx, cx) = unpack_hidden(hidden); int64_t hidden_size = hx.size(2); - AT_CHECK(_batch_sizes.dim() == 1, "batch_sizes tensor should be 1D"); + TORCH_CHECK(_batch_sizes.dim() == 1, "batch_sizes tensor should be 1D"); IntArrayRef batch_sizes { _batch_sizes.data_ptr(), static_cast(_batch_sizes.size(0)) }; Tensor dropout_state = at::empty({0}, input.options()); diff --git a/c10/util/Exception.h b/c10/util/Exception.h index 097d9141cb7..945243b05d5 100644 --- a/c10/util/Exception.h +++ b/c10/util/Exception.h @@ -369,9 +369,6 @@ C10_DEPRECATED_MESSAGE("AT_INDEX_ERROR(msg) is deprecated, use TORCH_CHECK_INDEX */ inline void deprecated_AT_INDEX_ERROR() {} -C10_DEPRECATED_MESSAGE("AT_CHECK is deprecated, use TORCH_CHECK instead.") -inline void deprecated_AT_CHECK() {} - /* // Deprecation disabled until we fix sites in our codebase C10_DEPRECATED_MESSAGE("AT_ASSERT is deprecated, if you mean to indicate an internal invariant failure, use " \ @@ -390,15 +387,6 @@ inline void deprecated_AT_ASSERTM() {} }} // namespace c10::detail -// Deprecated alias; this alias was deprecated because it wasn't clear to -// people that you should use a macro with AT_ prefix inside the torch/csrc -// directory. Use TORCH_CHECK instead. -#define AT_CHECK(...) \ - do { \ - ::c10::detail::deprecated_AT_CHECK(); \ - C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(__VA_ARGS__)); \ - } while (false) - // Deprecated alias; this alias was deprecated because people kept mistakenly // using it for user error checking. Use TORCH_INTERNAL_ASSERT or TORCH_CHECK // instead. See https://github.com/pytorch/pytorch/issues/20287 for more details. diff --git a/torch/csrc/utils/tensor_new.cpp b/torch/csrc/utils/tensor_new.cpp index 22acc78b2cb..85add73c57c 100644 --- a/torch/csrc/utils/tensor_new.cpp +++ b/torch/csrc/utils/tensor_new.cpp @@ -173,7 +173,7 @@ ScalarType infer_scalar_type(PyObject *obj) { switch (torch::tensors::get_default_scalar_type()) { case ScalarType::Float: return ScalarType::ComplexFloat; case ScalarType::Double: return ScalarType::ComplexDouble; - default: AT_CHECK(0, "invalid default scalar type for complex"); + default: TORCH_CHECK(false, "invalid default scalar type for complex"); } } if (THPVariable_Check(obj)) { diff --git a/torch/lib/c10d/ProcessGroupGloo.cpp b/torch/lib/c10d/ProcessGroupGloo.cpp index 3c0ecfbf693..64565e2f350 100644 --- a/torch/lib/c10d/ProcessGroupGloo.cpp +++ b/torch/lib/c10d/ProcessGroupGloo.cpp @@ -954,7 +954,7 @@ class AsyncSparseAllreduceWork : public ProcessGroupGloo::AsyncWork { continue; } const auto actual = metadata[i].sizes(); - AT_CHECK(actual == expected, "Sparse dimensions do not match"); + TORCH_CHECK(actual == expected, "Sparse dimensions do not match"); } } diff --git a/torch/lib/c10d/ProcessGroupNCCL.cpp b/torch/lib/c10d/ProcessGroupNCCL.cpp index 00644a718c0..cd4da13038f 100644 --- a/torch/lib/c10d/ProcessGroupNCCL.cpp +++ b/torch/lib/c10d/ProcessGroupNCCL.cpp @@ -450,7 +450,7 @@ void ProcessGroupNCCL::broadcastUniqueNCCLID(ncclUniqueId* ncclID) { store_->set(storeKey, vec); } else { auto vec = store_->get(storeKey); - AT_CHECK(vec.size() == NCCL_UNIQUE_ID_BYTES); + TORCH_CHECK(vec.size() == NCCL_UNIQUE_ID_BYTES); std::memcpy(ncclID, vec.data(), vec.size()); } }