From b32b002a6ea879e506453b09a4b206632e530abf Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Wed, 23 Apr 2025 11:14:35 +0000 Subject: [PATCH] [BE] Replace `std::runtime_error` with `TORCH_CHECK` [1/N] (#151880) Part of: #148114 Pull Request resolved: https://github.com/pytorch/pytorch/pull/151880 Approved by: https://github.com/albanD, https://github.com/Skylion007, https://github.com/cyyever --- aten/src/ATen/Context.cpp | 2 +- aten/src/ATen/Context.h | 3 +- aten/src/ATen/autocast_mode.h | 6 +- aten/src/ATen/core/Dict_inl.h | 3 +- aten/src/ATen/core/Tensor.cpp | 5 +- aten/src/ATen/core/function_schema_inl.h | 2 +- aten/src/ATen/miopen/Descriptors.cpp | 16 ++++- aten/src/ATen/mkl/Descriptors.h | 10 ++- aten/src/ATen/native/NNPACK.cpp | 85 ++++++++++++------------ 9 files changed, 70 insertions(+), 62 deletions(-) diff --git a/aten/src/ATen/Context.cpp b/aten/src/ATen/Context.cpp index b5ce540b52a..17be06edc04 100644 --- a/aten/src/ATen/Context.cpp +++ b/aten/src/ATen/Context.cpp @@ -694,7 +694,7 @@ void Context::setAllowFP16ReductionCPU(bool b) { #else if (true) #endif - throw std::runtime_error("Float16 arithmetic is not supported by the CPU!"); + TORCH_CHECK(false, "Float16 arithmetic is not supported by the CPU!"); } allow_fp16_reduction_cpu = b; } diff --git a/aten/src/ATen/Context.h b/aten/src/ATen/Context.h index 7d0f4c445f3..6de119c2c63 100644 --- a/aten/src/ATen/Context.h +++ b/aten/src/ATen/Context.h @@ -550,7 +550,8 @@ inline size_t getNumGPUs() { // devices for a specific device type, add that function to the // relevant library (e.g., similar to at::cuda::device_count()) if (hasCUDA() && hasHIP()) { - throw std::runtime_error( + TORCH_CHECK( + false, "Enabling both CUDA and HIP in ATen is not supported, as HIP masquerades " "to be CUDA (e.g., when you say CUDA, on a HIP build of ATen, this actually " "means HIP. Rebuild PyTorch with one or the other disabled."); diff --git a/aten/src/ATen/autocast_mode.h b/aten/src/ATen/autocast_mode.h index 56f5e2fe551..2d9e90a8676 100644 --- a/aten/src/ATen/autocast_mode.h +++ b/aten/src/ATen/autocast_mode.h @@ -195,7 +195,8 @@ inline DispatchKey get_autocast_dispatch_key_from_device_type( case c10::DeviceType::MPS: return DispatchKey::AutocastMPS; default: - throw std::runtime_error( + TORCH_CHECK( + false, "unknown device type for autocast in get_autocast_dispatch_key_from_device_type"); } } @@ -216,7 +217,8 @@ inline at::ScalarType get_lower_precision_fp_from_device_type( if (is_autocast_available(device_type)) { return get_autocast_dtype(device_type); } else { - throw std::runtime_error( + TORCH_CHECK( + false, "unknown device type for autocast in get_lower_precision_fp_from_device_type"); } } diff --git a/aten/src/ATen/core/Dict_inl.h b/aten/src/ATen/core/Dict_inl.h index 6261af5fb66..088fec4e85e 100644 --- a/aten/src/ATen/core/Dict_inl.h +++ b/aten/src/ATen/core/Dict_inl.h @@ -53,8 +53,7 @@ inline size_t DictKeyHash::operator()(const IValue& ivalue) const { } else if (ivalue.isDevice()) { return std::hash()(ivalue.toDevice()); } else { - throw std::runtime_error( - "Can't hash IValues with tag '" + ivalue.tagKind() + "'"); + TORCH_CHECK(false, "Can't hash IValues with tag '", ivalue.tagKind(), "'"); } } diff --git a/aten/src/ATen/core/Tensor.cpp b/aten/src/ATen/core/Tensor.cpp index 43474515db0..246418ad7ce 100644 --- a/aten/src/ATen/core/Tensor.cpp +++ b/aten/src/ATen/core/Tensor.cpp @@ -51,9 +51,8 @@ TensorBase TensorBase::to( } void TensorBase::enforce_invariants() { - if (impl_.get() == nullptr) { - throw std::runtime_error("TensorImpl with nullptr is not supported"); - } + TORCH_CHECK( + impl_.get() != nullptr, "TensorImpl with nullptr is not supported"); // Following line throws if the method is not a POD data type or is not // supported by ATen scalar_type(); diff --git a/aten/src/ATen/core/function_schema_inl.h b/aten/src/ATen/core/function_schema_inl.h index f4d5ee6a3fd..0c0715c29ab 100644 --- a/aten/src/ATen/core/function_schema_inl.h +++ b/aten/src/ATen/core/function_schema_inl.h @@ -71,7 +71,7 @@ inline void FunctionSchema::checkAndNormalizeInputs( for(const auto& k : kwargs) { names.emplace_back(k.first); } - throw std::runtime_error(findErrorInKwargs(names)); + TORCH_CHECK(false, findErrorInKwargs(names)); } } diff --git a/aten/src/ATen/miopen/Descriptors.cpp b/aten/src/ATen/miopen/Descriptors.cpp index 7c12378798a..08c09b88f99 100644 --- a/aten/src/ATen/miopen/Descriptors.cpp +++ b/aten/src/ATen/miopen/Descriptors.cpp @@ -17,7 +17,9 @@ inline miopenDataType_t getDataType(const at::Tensor& t) { } else if (scalar_type == at::kBFloat16) { return miopenBFloat16; } else { - throw std::runtime_error("TensorDescriptor only supports float, half and bfloat16 tensors"); + TORCH_CHECK( + false, + "TensorDescriptor only supports float, half and bfloat16 tensors"); } } @@ -35,7 +37,11 @@ void TensorDescriptor::set(miopenDataType_t datatype, IntArrayRef t_sizes, IntAr if (dim > MIOPEN_DIM_MAX || pad > MIOPEN_DIM_MAX) #define _STR(X) #X #define STR(X) _STR(X) - throw std::runtime_error("MIOpen supports only up to " STR(MIOPEN_DIM_MAX) " dimensions"); + TORCH_CHECK( + false, + "MIOpen supports only up to ", + STR(MIOPEN_DIM_MAX), + " dimensions"); #undef _STR #undef STR int size[MIOPEN_DIM_MAX]; @@ -96,7 +102,11 @@ void FilterDescriptor::set(const at::Tensor &t, const at::MemoryFormat memory_fo if (dim > static_cast(MIOPEN_DIM_MAX) || pad > static_cast(MIOPEN_DIM_MAX)) { #define _STR(X) #X #define STR(X) _STR(X) - throw std::runtime_error("MIOpen supports only up to " STR(MIOPEN_DIM_MAX) " dimensions"); + TORCH_CHECK( + false, + "MIOpen supports only up to ", + STR(MIOPEN_DIM_MAX), + " dimensions"); #undef _STR #undef STR } diff --git a/aten/src/ATen/mkl/Descriptors.h b/aten/src/ATen/mkl/Descriptors.h index 4a006639a7f..19f4df7cf64 100644 --- a/aten/src/ATen/mkl/Descriptors.h +++ b/aten/src/ATen/mkl/Descriptors.h @@ -17,9 +17,8 @@ struct DftiDescriptorDeleter { class DftiDescriptor { public: void init(DFTI_CONFIG_VALUE precision, DFTI_CONFIG_VALUE signal_type, MKL_LONG signal_ndim, MKL_LONG* sizes) { - if (desc_ != nullptr) { - throw std::runtime_error("DFTI DESCRIPTOR can only be initialized once"); - } + TORCH_CHECK( + desc_ == nullptr, "DFTI DESCRIPTOR can only be initialized once"); DFTI_DESCRIPTOR *raw_desc; if (signal_ndim == 1) { MKL_DFTI_CHECK(DftiCreateDescriptor(&raw_desc, precision, signal_type, 1, sizes[0])); @@ -30,9 +29,8 @@ public: } DFTI_DESCRIPTOR *get() const { - if (desc_ == nullptr) { - throw std::runtime_error("DFTI DESCRIPTOR has not been initialized"); - } + TORCH_CHECK( + desc_ != nullptr, "DFTI DESCRIPTOR has not been initialized"); return desc_.get(); } diff --git a/aten/src/ATen/native/NNPACK.cpp b/aten/src/ATen/native/NNPACK.cpp index 9a5ae286666..be6266b17fc 100644 --- a/aten/src/ATen/native/NNPACK.cpp +++ b/aten/src/ATen/native/NNPACK.cpp @@ -25,8 +25,7 @@ at::Tensor _nnpack_spatial_convolution( const Tensor& weight, const std::optional& bias_opt, const IntArrayRef padding, const IntArrayRef stride) { - throw std::runtime_error( - "nnpack_spatial_convolution: ATen not compiled with NNPACK support"); + TORCH_CHECK(false, "nnpack_spatial_convolution: ATen not compiled with NNPACK support"); } bool _nnpack_available() { @@ -143,51 +142,51 @@ Tensor _nnpack_spatial_convolution( input.options()); // Our input Tensor must be in the form N,C,H,W - if (input.ndimension() != 4) { - throw std::runtime_error( - "NNPack convolutionOutput expects 4D input Tensor N,C,H,W"); - } + TORCH_CHECK( + input.ndimension() == 4, + "NNPack convolutionOutput expects 4D input Tensor N,C,H,W"); + // Our weight Tensor must be in the form oC,iC,kH,kW - if (weight.ndimension() != 4) { - throw std::runtime_error( - "NNPack convolutionOutput expects 4D weight Tensor oC,iC,kH,kW"); - } + TORCH_CHECK( + weight.ndimension() == 4, + "NNPack convolutionOutput expects 4D weight Tensor oC,iC,kH,kW"); + // Our output Tensor must be in the form N,oC,oH,oW - if (output.ndimension() != 4) { - throw std::runtime_error( - "NNPack convolutionOutput expects 4D output Tensor N,oC,oH,oW"); - } + TORCH_CHECK( + output.ndimension() == 4, + "NNPack convolutionOutput expects 4D output Tensor N,oC,oH,oW"); // Some basic shape checking, not comprehensive - if (input.size(1) != weight.size(1)) { - std::stringstream err; - err << "Mismatch between number of input channels in input Tensor (" - << input.size(1) << ") and weight Tensor (" << weight.size(1) - << ") in NNPack convolutionOutput"; - throw std::runtime_error(err.str()); - } - if (weight.size(0) != output.size(1)) { - std::stringstream err; - err << "Mismatch between number of output channels in weight Tensor (" - << weight.size(0) << ") and output Tensor (" << output.size(1) - << ") in NNPack convolutionOutput"; - throw std::runtime_error(err.str()); - } - if (input.size(0) != output.size(0)) { - std::stringstream err; - err << "Mismatch between batch size in input Tensor (" << input.size(0) - << ") and output Tensor (" << output.size(0) - << ") in NNPack convolutionOutput"; - throw std::runtime_error(err.str()); - } + TORCH_CHECK( + input.size(1) == weight.size(1), + "Mismatch between number of input channels in input Tensor (", + input.size(1), + ") and weight Tensor (", + weight.size(1), + ") in NNPack convolutionOutput"); + + TORCH_CHECK( + weight.size(0) == output.size(1), + "Mismatch between number of output channels in weight Tensor (", + weight.size(0), + ") and output Tensor (", + output.size(1), + ") in NNPack convolutionOutput"); + + TORCH_CHECK( + input.size(0) == output.size(0), + "Mismatch between batch size in input Tensor (", + input.size(0), + ") and output Tensor (", + output.size(0), + ") in NNPack convolutionOutput"); // All Tensors must be float Tensors if (input.device().type() != kCPU || input.scalar_type() != kFloat || weight.device().type() != kCPU || weight.scalar_type() != kFloat || output.device().type() != kCPU || output.scalar_type() != kFloat || (bias.defined() && (bias.device().type() != kCPU || bias.scalar_type() != kFloat))) { - throw std::runtime_error( - "Mismatched Tensor types in NNPack convolutionOutput"); + TORCH_CHECK(false, "Mismatched Tensor types in NNPack convolutionOutput"); } const auto algorithm = nnp_convolution_algorithm_auto; @@ -281,9 +280,9 @@ Tensor _nnpack_spatial_convolution( auto size_and_allocate_ws = [&]() { // Run a single pass to get the size of memory workspace buffer const auto status = compute(batch_size); - if (status != nnp_status_success) { - throw std::runtime_error("NNPACK SpatialConvolution_updateOutput failed"); - } + TORCH_CHECK( + status == nnp_status_success, + "NNPACK SpatialConvolution_updateOutput failed"); workspace.allocate(); }; @@ -304,9 +303,9 @@ Tensor _nnpack_spatial_convolution( status = compute(batch_size); } - if (status != nnp_status_success) { - throw std::runtime_error("NNPACK SpatialConvolution_updateOutput failed"); - } + TORCH_CHECK( + status == nnp_status_success, + "NNPACK SpatialConvolution_updateOutput failed"); return output; }