mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[CodeClean] Replace std::runtime_error with TORCH_CHECK (#165119)
As the title stated. **Changes**: - torch/csrc/inductor(Part 2) Pull Request resolved: https://github.com/pytorch/pytorch/pull/165119 Approved by: https://github.com/janeyx99 ghstack dependencies: #165139
This commit is contained in:
parent
fcd5f8c352
commit
398775a43e
|
|
@ -836,10 +836,9 @@ class AOTInductorModelBase {
|
|||
}
|
||||
|
||||
void update_constants_array_from_map() {
|
||||
if (!constants_map_) {
|
||||
throw std::runtime_error{
|
||||
"constants_map_ was not ready when constants_ is trying to be constructed from it!"};
|
||||
}
|
||||
STD_TORCH_CHECK(
|
||||
constants_map_,
|
||||
"constants_map_ was not ready when constants_ is trying to be constructed from it!");
|
||||
if (!constants_) {
|
||||
constants_ =
|
||||
std::make_shared<std::vector<ConstantHandle>>(constants_info_.size());
|
||||
|
|
@ -875,9 +874,7 @@ class AOTInductorModelBase {
|
|||
/// Returns true if the model is complete.
|
||||
bool is_finished() {
|
||||
#ifdef USE_CUDA
|
||||
if (!run_finished_) {
|
||||
throw std::runtime_error{"Model CUDA event was not initialized"};
|
||||
}
|
||||
STD_TORCH_CHECK(run_finished_, "Model CUDA event was not initialized");
|
||||
|
||||
auto event_status = cudaEventQuery(*run_finished_);
|
||||
if (event_status == cudaSuccess) {
|
||||
|
|
@ -886,13 +883,13 @@ class AOTInductorModelBase {
|
|||
return false;
|
||||
}
|
||||
|
||||
throw std::runtime_error(
|
||||
std::string("The model did not finish successfully. Error: ") +
|
||||
STD_TORCH_CHECK(
|
||||
false,
|
||||
"The model did not finish successfully. Error: ",
|
||||
cudaGetErrorString(cudaGetLastError()));
|
||||
#elif defined(USE_XPU)
|
||||
if (!run_finished_) {
|
||||
throw std::runtime_error{"Model XPU event was not initialized"};
|
||||
}
|
||||
STD_TORCH_CHECK(run_finished_, "Model XPU event was not initialized");
|
||||
|
||||
using namespace sycl::info;
|
||||
return (*run_finished_)->get_info<event::command_execution_status>() ==
|
||||
event_command_status::complete;
|
||||
|
|
@ -904,19 +901,14 @@ class AOTInductorModelBase {
|
|||
|
||||
/// Synchronizes completion event.
|
||||
void wait_for_completion() {
|
||||
STD_TORCH_CHECK(run_finished_, "Model event was not initialized");
|
||||
#ifdef USE_CUDA
|
||||
if (!run_finished_) {
|
||||
throw std::runtime_error{"Model event was not initialized"};
|
||||
}
|
||||
|
||||
AOTI_RUNTIME_CUDA_CHECK(cudaEventSynchronize(*run_finished_));
|
||||
#endif // USE_CUDA
|
||||
|
||||
#ifdef USE_XPU
|
||||
if (!run_finished_) {
|
||||
throw std::runtime_error{"Model event was not initialized"};
|
||||
}
|
||||
(*run_finished_)->wait_and_throw();
|
||||
#endif
|
||||
#endif // USE_XPU
|
||||
}
|
||||
|
||||
protected:
|
||||
|
|
|
|||
|
|
@ -123,8 +123,10 @@ class AOTInductorModelContainer {
|
|||
constants_folding_lk.unlock();
|
||||
model_lk.lock();
|
||||
} else if (const_folded != ConstantState::FOLDED) {
|
||||
throw std::runtime_error(
|
||||
"Unknown constant state: " + toStringConstantState(constant_folded_));
|
||||
STD_TORCH_CHECK(
|
||||
false,
|
||||
"Unknown constant state: ",
|
||||
toStringConstantState(constant_folded_));
|
||||
}
|
||||
|
||||
try {
|
||||
|
|
@ -167,8 +169,10 @@ class AOTInductorModelContainer {
|
|||
/* validate_full_update = */ false);
|
||||
const_folded = ConstantState::FOLDED;
|
||||
} else if (constant_folded_ != ConstantState::FOLDED) {
|
||||
throw std::runtime_error(
|
||||
"Unknown constant state: " + toStringConstantState(constant_folded_));
|
||||
STD_TORCH_CHECK(
|
||||
false,
|
||||
"Unknown constant state: ",
|
||||
toStringConstantState(constant_folded_));
|
||||
}
|
||||
|
||||
model->run_single_threaded(
|
||||
|
|
@ -202,56 +206,56 @@ class AOTInductorModelContainer {
|
|||
}
|
||||
|
||||
size_t num_constants() const {
|
||||
if (this->num_models() == 0) {
|
||||
throw std::runtime_error("No available models in container!");
|
||||
}
|
||||
STD_TORCH_CHECK(
|
||||
this->num_models() != 0, "No available models in container!");
|
||||
|
||||
return models_[0]->num_constants();
|
||||
}
|
||||
|
||||
// retrieve the constant name of constants_info_[idx]
|
||||
const char* constant_name(size_t idx) const {
|
||||
if (this->num_models() == 0) {
|
||||
throw std::runtime_error("No available models in container!");
|
||||
}
|
||||
STD_TORCH_CHECK(
|
||||
this->num_models() != 0, "No available models in container!");
|
||||
|
||||
return models_[0]->constant_name(static_cast<int64_t>(idx));
|
||||
}
|
||||
|
||||
// retrieve original FQN of constants_info_[idx]
|
||||
const char* constant_original_fqn(size_t idx) const {
|
||||
if (this->num_models() == 0) {
|
||||
throw std::runtime_error("No available models in container!");
|
||||
}
|
||||
STD_TORCH_CHECK(
|
||||
this->num_models() != 0, "No available models in container!");
|
||||
|
||||
return models_[0]->constant_original_fqn(static_cast<int64_t>(idx));
|
||||
}
|
||||
|
||||
// retrieve whether constant is from folded of constants_info_[idx]
|
||||
bool constant_from_folded(size_t idx) const {
|
||||
if (this->num_models() == 0) {
|
||||
throw std::runtime_error("No available models in container!");
|
||||
}
|
||||
STD_TORCH_CHECK(
|
||||
this->num_models() != 0, "No available models in container!");
|
||||
|
||||
return models_[0]->constant_from_folded(static_cast<int64_t>(idx));
|
||||
}
|
||||
|
||||
size_t constant_data_size(size_t idx) const {
|
||||
if (this->num_models() == 0) {
|
||||
throw std::runtime_error("No available models in container!");
|
||||
}
|
||||
STD_TORCH_CHECK(
|
||||
this->num_models() != 0, "No available models in container!");
|
||||
|
||||
return models_[0]->constant_data_size(static_cast<int64_t>(idx));
|
||||
}
|
||||
|
||||
// retrieve type of constants_info_[idx]
|
||||
int32_t constant_type(size_t idx) const {
|
||||
if (this->num_models() == 0) {
|
||||
throw std::runtime_error("No available models in container!");
|
||||
}
|
||||
STD_TORCH_CHECK(
|
||||
this->num_models() != 0, "No available models in container!");
|
||||
|
||||
return models_[0]->constant_type(static_cast<int64_t>(idx));
|
||||
}
|
||||
|
||||
// retrieve dtype of constants_info_[idx]
|
||||
int32_t constant_dtype(size_t idx) const {
|
||||
if (this->num_models() == 0) {
|
||||
throw std::runtime_error("No available models in container!");
|
||||
}
|
||||
STD_TORCH_CHECK(
|
||||
this->num_models() != 0, "No available models in container!");
|
||||
|
||||
return models_[0]->constant_dtype(static_cast<int64_t>(idx));
|
||||
}
|
||||
|
||||
|
|
@ -383,9 +387,12 @@ class AOTInductorModelContainer {
|
|||
<< " in model, but not provided by user!\n";
|
||||
continue;
|
||||
}
|
||||
throw std::runtime_error(
|
||||
std::string("Cannot find constants ") + constant_name +
|
||||
std::string(" in constants_map!"));
|
||||
|
||||
STD_TORCH_CHECK(
|
||||
false,
|
||||
"Cannot find constants ",
|
||||
constant_name,
|
||||
" in constants_map!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -395,9 +402,8 @@ class AOTInductorModelContainer {
|
|||
std::unordered_map<std::string, AtenTensorHandle>&& constants_map,
|
||||
bool use_inactive,
|
||||
bool validate_full_update) {
|
||||
if (this->num_models() == 0) {
|
||||
throw std::runtime_error("No model available in container!");
|
||||
}
|
||||
STD_TORCH_CHECK(
|
||||
this->num_models() != 0, "No available models in container!");
|
||||
if (validate_full_update) {
|
||||
assert_all_constants(constants_map);
|
||||
}
|
||||
|
|
@ -443,9 +449,9 @@ class AOTInductorModelContainer {
|
|||
bool use_inactive,
|
||||
bool validate_full_update,
|
||||
bool user_managed = false) {
|
||||
if (this->num_models() == 0) {
|
||||
throw std::runtime_error("No model available in container!");
|
||||
}
|
||||
STD_TORCH_CHECK(
|
||||
this->num_models() != 0, "No model available in container!");
|
||||
|
||||
if (validate_full_update) {
|
||||
assert_all_constants(constants_map);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ namespace torch::aot_inductor {
|
|||
|
||||
template <typename T>
|
||||
inline RAIIAtenTensorHandle scalar_to_tensor_handle(T value) {
|
||||
throw std::runtime_error("Unsupported scalar_to_tensor_handle");
|
||||
STD_TORCH_CHECK(false, "Unsupported scalar_to_tensor_handle");
|
||||
}
|
||||
|
||||
// Specialize for supported C++ primitive types
|
||||
|
|
|
|||
|
|
@ -11,11 +11,11 @@ template <>
|
|||
struct ThreadLocalCachedOutputTensor<RAIIAtenTensorHandle> {
|
||||
explicit ThreadLocalCachedOutputTensor(const RAIIAtenTensorHandle&) {}
|
||||
void copy_data_from(const RAIIAtenTensorHandle& handle) {
|
||||
throw std::runtime_error("can't happen");
|
||||
STD_TORCH_CHECK(false, "can't happen");
|
||||
}
|
||||
|
||||
AtenTensorHandle tensor() const {
|
||||
throw std::runtime_error("can't happen");
|
||||
STD_TORCH_CHECK(false, "can't happen");
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -23,11 +23,11 @@ template <>
|
|||
struct ThreadLocalCachedOutputTensor<AtenTensorHandle> {
|
||||
explicit ThreadLocalCachedOutputTensor(const AtenTensorHandle&) {}
|
||||
void copy_data_from(const AtenTensorHandle& handle) {
|
||||
throw std::runtime_error("can't happen");
|
||||
STD_TORCH_CHECK(false, "can't happen");
|
||||
}
|
||||
|
||||
AtenTensorHandle tensor() const {
|
||||
throw std::runtime_error("can't happen");
|
||||
STD_TORCH_CHECK(false, "can't happen");
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -35,11 +35,11 @@ template <>
|
|||
struct ThreadLocalCachedOutputTensor<ConstantHandle> {
|
||||
explicit ThreadLocalCachedOutputTensor(const ConstantHandle&) {}
|
||||
void copy_data_from(const ConstantHandle& handle) {
|
||||
throw std::runtime_error("can't happen");
|
||||
STD_TORCH_CHECK(false, "can't happen");
|
||||
}
|
||||
|
||||
AtenTensorHandle tensor() const {
|
||||
throw std::runtime_error("can't happen");
|
||||
STD_TORCH_CHECK(false, "can't happen");
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -92,18 +92,18 @@ struct ThreadLocalCachedOutputArray;
|
|||
template <>
|
||||
struct ThreadLocalCachedOutputArray<RAIIAtenTensorHandle> {
|
||||
explicit ThreadLocalCachedOutputArray(const RAIIAtenTensorHandle&) {
|
||||
throw std::runtime_error("can't happen");
|
||||
STD_TORCH_CHECK(false, "can't happen");
|
||||
}
|
||||
|
||||
// Not supported yet! We would need to put contiguous() or
|
||||
// expect_contiguous() into the ABI.
|
||||
void copy_data_from(const RAIIAtenTensorHandle&) {
|
||||
throw std::runtime_error("can't happen");
|
||||
STD_TORCH_CHECK(false, "can't happen");
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
ArrayRefTensor<U> arrayref_tensor() const {
|
||||
throw std::runtime_error("can't happen");
|
||||
STD_TORCH_CHECK(false, "can't happen");
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -111,18 +111,18 @@ struct ThreadLocalCachedOutputArray<RAIIAtenTensorHandle> {
|
|||
template <>
|
||||
struct ThreadLocalCachedOutputArray<ConstantHandle> {
|
||||
explicit ThreadLocalCachedOutputArray(const ConstantHandle&) {
|
||||
throw std::runtime_error("can't happen");
|
||||
STD_TORCH_CHECK(false, "can't happen");
|
||||
}
|
||||
|
||||
// Not supported yet! We would need to put contiguous() or
|
||||
// expect_contiguous() into the ABI.
|
||||
void copy_data_from(const ConstantHandle&) {
|
||||
throw std::runtime_error("can't happen");
|
||||
STD_TORCH_CHECK(false, "can't happen");
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
ArrayRefTensor<U> arrayref_tensor() const {
|
||||
throw std::runtime_error("can't happen");
|
||||
STD_TORCH_CHECK(false, "can't happen");
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1341,13 +1341,14 @@ AOTITorchError aoti_torch_proxy_executor_call_function(
|
|||
int num_tensors,
|
||||
AtenTensorHandle* flatten_tensor_args) {
|
||||
AOTI_TORCH_CONVERT_EXCEPTION_TO_ERROR_CODE({
|
||||
if (!proxy_executor) {
|
||||
throw std::runtime_error(
|
||||
"Unable to find a proxy executor to run custom ops. Please check if "
|
||||
"there is a json file generated in the same directory as the so, or use "
|
||||
"torch._inductor.aoti_compile_and_package to package everything into a "
|
||||
"PT2 artifact.");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
proxy_executor != nullptr,
|
||||
"Unable to find a proxy executor to run custom ops.",
|
||||
"Please check if there is a json file generated",
|
||||
"in the same directory as the so,",
|
||||
"or use torch._inductor.aoti_compile_and_package",
|
||||
"to package everything into a PT2 artifact.");
|
||||
|
||||
ProxyExecutor* executor = reinterpret_cast<ProxyExecutor*>(proxy_executor);
|
||||
executor->call_function(
|
||||
extern_node_index,
|
||||
|
|
|
|||
|
|
@ -10,9 +10,7 @@ AOTITorchError aoti_torch_mps_set_arg_tensor(
|
|||
AtenTensorHandle tensor) {
|
||||
AOTI_TORCH_CONVERT_EXCEPTION_TO_ERROR_CODE({
|
||||
auto t = tensor_handle_to_tensor_pointer(tensor);
|
||||
if (t == nullptr) {
|
||||
throw std::runtime_error("Tensor is null.");
|
||||
}
|
||||
TORCH_CHECK(t != nullptr, "Tensor is null.");
|
||||
auto func = reinterpret_cast<at::native::mps::MetalKernelFunction*>(handle);
|
||||
func->setArg(idx, *t);
|
||||
});
|
||||
|
|
|
|||
|
|
@ -92,13 +92,11 @@ inline void assert_inf_and_nan(
|
|||
const std::string& tensor_name,
|
||||
at::Tensor& check_tensor) {
|
||||
auto isnan_tensor = check_tensor.isnan();
|
||||
if (isnan_tensor.any().item<bool>()) {
|
||||
throw std::runtime_error("At least one NaN in " + tensor_name);
|
||||
}
|
||||
TORCH_CHECK(
|
||||
!isnan_tensor.any().item<bool>(), "At least one NaN in ", tensor_name);
|
||||
auto isinf_tensor = check_tensor.isinf();
|
||||
if (isinf_tensor.any().item<bool>()) {
|
||||
throw std::runtime_error("At least one INF in " + tensor_name);
|
||||
}
|
||||
TORCH_CHECK(
|
||||
!isinf_tensor.any().item<bool>(), "At least one INF in ", tensor_name);
|
||||
}
|
||||
|
||||
// utility functions to convert a pointer to an optional value
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user