mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Differential Revision: D16440000 Original commit changeset: e05683275522 fbshipit-source-id: b688f24c1e6d3d8f63c2d415262a3f0ab1b85914
30 lines
797 B
C++
30 lines
797 B
C++
#include <torch/csrc/utils/cuda_lazy_init.h>
|
|
|
|
#include <torch/csrc/python_headers.h>
|
|
#include <mutex>
|
|
|
|
#include <torch/csrc/Exceptions.h>
|
|
#include <torch/csrc/utils/object_ptr.h>
|
|
|
|
namespace torch {
|
|
namespace utils {
|
|
|
|
void cuda_lazy_init() {
|
|
AutoGIL g;
|
|
// Protected by the GIL. We don't use call_once because under ASAN it
|
|
// has a buggy implementation that deadlocks if an instance throws an
|
|
// exception. In any case, call_once isn't necessary, because we
|
|
// have taken a lock.
|
|
static bool run_yet = false;
|
|
if (!run_yet) {
|
|
auto module = THPObjectPtr(PyImport_ImportModule("torch.cuda"));
|
|
if (!module) throw python_error();
|
|
auto res = THPObjectPtr(PyObject_CallMethod(module.get(), "_lazy_init", ""));
|
|
if (!res) throw python_error();
|
|
run_yet = true;
|
|
}
|
|
}
|
|
|
|
}
|
|
}
|