mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[BE] Replace std::runtime_error with TORCH_CHECK [1/N] (#151880)
Part of: #148114 Pull Request resolved: https://github.com/pytorch/pytorch/pull/151880 Approved by: https://github.com/albanD, https://github.com/Skylion007, https://github.com/cyyever
This commit is contained in:
parent
6d28d61323
commit
b32b002a6e
|
|
@ -694,7 +694,7 @@ void Context::setAllowFP16ReductionCPU(bool b) {
|
|||
#else
|
||||
if (true)
|
||||
#endif
|
||||
throw std::runtime_error("Float16 arithmetic is not supported by the CPU!");
|
||||
TORCH_CHECK(false, "Float16 arithmetic is not supported by the CPU!");
|
||||
}
|
||||
allow_fp16_reduction_cpu = b;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -550,7 +550,8 @@ inline size_t getNumGPUs() {
|
|||
// devices for a specific device type, add that function to the
|
||||
// relevant library (e.g., similar to at::cuda::device_count())
|
||||
if (hasCUDA() && hasHIP()) {
|
||||
throw std::runtime_error(
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"Enabling both CUDA and HIP in ATen is not supported, as HIP masquerades "
|
||||
"to be CUDA (e.g., when you say CUDA, on a HIP build of ATen, this actually "
|
||||
"means HIP. Rebuild PyTorch with one or the other disabled.");
|
||||
|
|
|
|||
|
|
@ -195,7 +195,8 @@ inline DispatchKey get_autocast_dispatch_key_from_device_type(
|
|||
case c10::DeviceType::MPS:
|
||||
return DispatchKey::AutocastMPS;
|
||||
default:
|
||||
throw std::runtime_error(
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"unknown device type for autocast in get_autocast_dispatch_key_from_device_type");
|
||||
}
|
||||
}
|
||||
|
|
@ -216,7 +217,8 @@ inline at::ScalarType get_lower_precision_fp_from_device_type(
|
|||
if (is_autocast_available(device_type)) {
|
||||
return get_autocast_dtype(device_type);
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"unknown device type for autocast in get_lower_precision_fp_from_device_type");
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -53,8 +53,7 @@ inline size_t DictKeyHash::operator()(const IValue& ivalue) const {
|
|||
} else if (ivalue.isDevice()) {
|
||||
return std::hash<Device>()(ivalue.toDevice());
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
"Can't hash IValues with tag '" + ivalue.tagKind() + "'");
|
||||
TORCH_CHECK(false, "Can't hash IValues with tag '", ivalue.tagKind(), "'");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -51,9 +51,8 @@ TensorBase TensorBase::to(
|
|||
}
|
||||
|
||||
void TensorBase::enforce_invariants() {
|
||||
if (impl_.get() == nullptr) {
|
||||
throw std::runtime_error("TensorImpl with nullptr is not supported");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
impl_.get() != nullptr, "TensorImpl with nullptr is not supported");
|
||||
// Following line throws if the method is not a POD data type or is not
|
||||
// supported by ATen
|
||||
scalar_type();
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ inline void FunctionSchema::checkAndNormalizeInputs(
|
|||
for(const auto& k : kwargs) {
|
||||
names.emplace_back(k.first);
|
||||
}
|
||||
throw std::runtime_error(findErrorInKwargs(names));
|
||||
TORCH_CHECK(false, findErrorInKwargs(names));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,9 @@ inline miopenDataType_t getDataType(const at::Tensor& t) {
|
|||
} else if (scalar_type == at::kBFloat16) {
|
||||
return miopenBFloat16;
|
||||
} else {
|
||||
throw std::runtime_error("TensorDescriptor only supports float, half and bfloat16 tensors");
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"TensorDescriptor only supports float, half and bfloat16 tensors");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -35,7 +37,11 @@ void TensorDescriptor::set(miopenDataType_t datatype, IntArrayRef t_sizes, IntAr
|
|||
if (dim > MIOPEN_DIM_MAX || pad > MIOPEN_DIM_MAX)
|
||||
#define _STR(X) #X
|
||||
#define STR(X) _STR(X)
|
||||
throw std::runtime_error("MIOpen supports only up to " STR(MIOPEN_DIM_MAX) " dimensions");
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"MIOpen supports only up to ",
|
||||
STR(MIOPEN_DIM_MAX),
|
||||
" dimensions");
|
||||
#undef _STR
|
||||
#undef STR
|
||||
int size[MIOPEN_DIM_MAX];
|
||||
|
|
@ -96,7 +102,11 @@ void FilterDescriptor::set(const at::Tensor &t, const at::MemoryFormat memory_fo
|
|||
if (dim > static_cast<int64_t>(MIOPEN_DIM_MAX) || pad > static_cast<int64_t>(MIOPEN_DIM_MAX)) {
|
||||
#define _STR(X) #X
|
||||
#define STR(X) _STR(X)
|
||||
throw std::runtime_error("MIOpen supports only up to " STR(MIOPEN_DIM_MAX) " dimensions");
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"MIOpen supports only up to ",
|
||||
STR(MIOPEN_DIM_MAX),
|
||||
" dimensions");
|
||||
#undef _STR
|
||||
#undef STR
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,9 +17,8 @@ struct DftiDescriptorDeleter {
|
|||
class DftiDescriptor {
|
||||
public:
|
||||
void init(DFTI_CONFIG_VALUE precision, DFTI_CONFIG_VALUE signal_type, MKL_LONG signal_ndim, MKL_LONG* sizes) {
|
||||
if (desc_ != nullptr) {
|
||||
throw std::runtime_error("DFTI DESCRIPTOR can only be initialized once");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
desc_ == nullptr, "DFTI DESCRIPTOR can only be initialized once");
|
||||
DFTI_DESCRIPTOR *raw_desc;
|
||||
if (signal_ndim == 1) {
|
||||
MKL_DFTI_CHECK(DftiCreateDescriptor(&raw_desc, precision, signal_type, 1, sizes[0]));
|
||||
|
|
@ -30,9 +29,8 @@ public:
|
|||
}
|
||||
|
||||
DFTI_DESCRIPTOR *get() const {
|
||||
if (desc_ == nullptr) {
|
||||
throw std::runtime_error("DFTI DESCRIPTOR has not been initialized");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
desc_ != nullptr, "DFTI DESCRIPTOR has not been initialized");
|
||||
return desc_.get();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,8 +25,7 @@ at::Tensor _nnpack_spatial_convolution(
|
|||
const Tensor& weight, const std::optional<Tensor>& bias_opt,
|
||||
const IntArrayRef padding,
|
||||
const IntArrayRef stride) {
|
||||
throw std::runtime_error(
|
||||
"nnpack_spatial_convolution: ATen not compiled with NNPACK support");
|
||||
TORCH_CHECK(false, "nnpack_spatial_convolution: ATen not compiled with NNPACK support");
|
||||
}
|
||||
|
||||
bool _nnpack_available() {
|
||||
|
|
@ -143,51 +142,51 @@ Tensor _nnpack_spatial_convolution(
|
|||
input.options());
|
||||
|
||||
// Our input Tensor must be in the form N,C,H,W
|
||||
if (input.ndimension() != 4) {
|
||||
throw std::runtime_error(
|
||||
"NNPack convolutionOutput expects 4D input Tensor N,C,H,W");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
input.ndimension() == 4,
|
||||
"NNPack convolutionOutput expects 4D input Tensor N,C,H,W");
|
||||
|
||||
// Our weight Tensor must be in the form oC,iC,kH,kW
|
||||
if (weight.ndimension() != 4) {
|
||||
throw std::runtime_error(
|
||||
"NNPack convolutionOutput expects 4D weight Tensor oC,iC,kH,kW");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
weight.ndimension() == 4,
|
||||
"NNPack convolutionOutput expects 4D weight Tensor oC,iC,kH,kW");
|
||||
|
||||
// Our output Tensor must be in the form N,oC,oH,oW
|
||||
if (output.ndimension() != 4) {
|
||||
throw std::runtime_error(
|
||||
"NNPack convolutionOutput expects 4D output Tensor N,oC,oH,oW");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
output.ndimension() == 4,
|
||||
"NNPack convolutionOutput expects 4D output Tensor N,oC,oH,oW");
|
||||
|
||||
// Some basic shape checking, not comprehensive
|
||||
if (input.size(1) != weight.size(1)) {
|
||||
std::stringstream err;
|
||||
err << "Mismatch between number of input channels in input Tensor ("
|
||||
<< input.size(1) << ") and weight Tensor (" << weight.size(1)
|
||||
<< ") in NNPack convolutionOutput";
|
||||
throw std::runtime_error(err.str());
|
||||
}
|
||||
if (weight.size(0) != output.size(1)) {
|
||||
std::stringstream err;
|
||||
err << "Mismatch between number of output channels in weight Tensor ("
|
||||
<< weight.size(0) << ") and output Tensor (" << output.size(1)
|
||||
<< ") in NNPack convolutionOutput";
|
||||
throw std::runtime_error(err.str());
|
||||
}
|
||||
if (input.size(0) != output.size(0)) {
|
||||
std::stringstream err;
|
||||
err << "Mismatch between batch size in input Tensor (" << input.size(0)
|
||||
<< ") and output Tensor (" << output.size(0)
|
||||
<< ") in NNPack convolutionOutput";
|
||||
throw std::runtime_error(err.str());
|
||||
}
|
||||
TORCH_CHECK(
|
||||
input.size(1) == weight.size(1),
|
||||
"Mismatch between number of input channels in input Tensor (",
|
||||
input.size(1),
|
||||
") and weight Tensor (",
|
||||
weight.size(1),
|
||||
") in NNPack convolutionOutput");
|
||||
|
||||
TORCH_CHECK(
|
||||
weight.size(0) == output.size(1),
|
||||
"Mismatch between number of output channels in weight Tensor (",
|
||||
weight.size(0),
|
||||
") and output Tensor (",
|
||||
output.size(1),
|
||||
") in NNPack convolutionOutput");
|
||||
|
||||
TORCH_CHECK(
|
||||
input.size(0) == output.size(0),
|
||||
"Mismatch between batch size in input Tensor (",
|
||||
input.size(0),
|
||||
") and output Tensor (",
|
||||
output.size(0),
|
||||
") in NNPack convolutionOutput");
|
||||
|
||||
// All Tensors must be float Tensors
|
||||
if (input.device().type() != kCPU || input.scalar_type() != kFloat ||
|
||||
weight.device().type() != kCPU || weight.scalar_type() != kFloat ||
|
||||
output.device().type() != kCPU || output.scalar_type() != kFloat ||
|
||||
(bias.defined() && (bias.device().type() != kCPU || bias.scalar_type() != kFloat))) {
|
||||
throw std::runtime_error(
|
||||
"Mismatched Tensor types in NNPack convolutionOutput");
|
||||
TORCH_CHECK(false, "Mismatched Tensor types in NNPack convolutionOutput");
|
||||
}
|
||||
|
||||
const auto algorithm = nnp_convolution_algorithm_auto;
|
||||
|
|
@ -281,9 +280,9 @@ Tensor _nnpack_spatial_convolution(
|
|||
auto size_and_allocate_ws = [&]() {
|
||||
// Run a single pass to get the size of memory workspace buffer
|
||||
const auto status = compute(batch_size);
|
||||
if (status != nnp_status_success) {
|
||||
throw std::runtime_error("NNPACK SpatialConvolution_updateOutput failed");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
status == nnp_status_success,
|
||||
"NNPACK SpatialConvolution_updateOutput failed");
|
||||
workspace.allocate();
|
||||
};
|
||||
|
||||
|
|
@ -304,9 +303,9 @@ Tensor _nnpack_spatial_convolution(
|
|||
status = compute(batch_size);
|
||||
}
|
||||
|
||||
if (status != nnp_status_success) {
|
||||
throw std::runtime_error("NNPACK SpatialConvolution_updateOutput failed");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
status == nnp_status_success,
|
||||
"NNPACK SpatialConvolution_updateOutput failed");
|
||||
|
||||
return output;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user