pytorch/torch/csrc/utils/cuda_lazy_init.cpp
Edward Yang 1111a6b810 Use pybind11::gil_scoped_* functions instead of AutoGIL/AutoNoGIL (#30274)
Summary:
Reland of https://github.com/pytorch/pytorch/pull/29095
Pull Request resolved: https://github.com/pytorch/pytorch/pull/30274

Differential Revision: D18762293

Pulled By: ezyang

fbshipit-source-id: d3d50c2dd12bcb678ab25fa708eb6587cc4b66f9
2019-12-02 12:19:58 -08:00

34 lines
879 B
C++

#include <torch/csrc/utils/cuda_lazy_init.h>
#include <torch/csrc/python_headers.h>
#include <mutex>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/utils/object_ptr.h>
namespace torch {
namespace utils {
static bool run_yet = false;
void cuda_lazy_init() {
pybind11::gil_scoped_acquire g;
// Protected by the GIL. We don't use call_once because under ASAN it
// has a buggy implementation that deadlocks if an instance throws an
// exception. In any case, call_once isn't necessary, because we
// have taken a lock.
if (!run_yet) {
auto module = THPObjectPtr(PyImport_ImportModule("torch.cuda"));
if (!module) throw python_error();
auto res = THPObjectPtr(PyObject_CallMethod(module.get(), "_lazy_init", ""));
if (!res) throw python_error();
run_yet = true;
}
}
void set_run_yet_variable_to_false() {
run_yet = false;
}
}
}