mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Let's have some fun. Pull Request resolved: https://github.com/pytorch/pytorch/pull/78828 Approved by: https://github.com/ezyang
37 lines
935 B
C++
37 lines
935 B
C++
#include <torch/csrc/utils/cuda_lazy_init.h>
|
|
|
|
#include <torch/csrc/python_headers.h>
|
|
#include <mutex>
|
|
|
|
#include <torch/csrc/Exceptions.h>
|
|
#include <torch/csrc/utils/object_ptr.h>
|
|
namespace torch {
|
|
namespace utils {
|
|
|
|
static bool run_yet = false;
|
|
|
|
void cuda_lazy_init() {
|
|
pybind11::gil_scoped_acquire g;
|
|
// Protected by the GIL. We don't use call_once because under ASAN it
|
|
// has a buggy implementation that deadlocks if an instance throws an
|
|
// exception. In any case, call_once isn't necessary, because we
|
|
// have taken a lock.
|
|
if (!run_yet) {
|
|
auto module = THPObjectPtr(PyImport_ImportModule("torch.cuda"));
|
|
if (!module)
|
|
throw python_error();
|
|
auto res =
|
|
THPObjectPtr(PyObject_CallMethod(module.get(), "_lazy_init", ""));
|
|
if (!res)
|
|
throw python_error();
|
|
run_yet = true;
|
|
}
|
|
}
|
|
|
|
void set_run_yet_variable_to_false() {
|
|
run_yet = false;
|
|
}
|
|
|
|
} // namespace utils
|
|
} // namespace torch
|