pytorch/torch/csrc/utils/device_lazy_init.cpp
sifengyang 46903d978b fix maybe_initialize_device for custom device. (#121379)
1. fix maybe_initialize_device for custom device.
@wanchaol  @albanD

@albanD  I am very sorry that I have resubmitted a PR by new e-mail.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/121379
Approved by: https://github.com/albanD
2024-04-09 16:58:52 +00:00

58 lines
1.6 KiB
C++

#include <c10/core/impl/TorchDispatchModeTLS.h>
#include <torch/csrc/utils/device_lazy_init.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/object_ptr.h>
#include <iostream>
namespace torch::utils {
namespace {
std::array<bool, at::COMPILE_TIME_MAX_DEVICE_TYPES> is_initialized{};
} // anonymous namespace
void device_lazy_init(at::DeviceType device_type) {
pybind11::gil_scoped_acquire g;
// Protected by the GIL. We don't use call_once because under ASAN it
// has a buggy implementation that deadlocks if an instance throws an
// exception. In any case, call_once isn't necessary, because we
// have taken a lock.
if (is_initialized[static_cast<int>(device_type)]) {
return;
}
auto maybe_mode = c10::impl::TorchDispatchModeTLS::get_mode(
c10::impl::TorchDispatchModeKey::FAKE);
if (maybe_mode) {
return;
}
std::string module_name = "torch." + at::DeviceTypeName(device_type, true);
auto module = THPObjectPtr(PyImport_ImportModule(module_name.c_str()));
if (!module) {
throw python_error();
}
if (device_type == at::DeviceType::PrivateUse1) {
auto has_lazy_init_method =
PyObject_HasAttrString(module.get(), "_lazy_init") == 1;
if (!has_lazy_init_method) {
return;
}
}
auto res = THPObjectPtr(PyObject_CallMethod(module.get(), "_lazy_init", ""));
if (!res) {
throw python_error();
}
is_initialized[static_cast<int>(device_type)] = true;
}
void set_requires_device_init(at::DeviceType device_type, bool value) {
is_initialized[static_cast<int>(device_type)] = !value;
}
} // namespace torch::utils