pytorch/torch/csrc/utils/cuda_lazy_init.cpp
Guanheng Zhang aa660b8eb7 Re-land "Fix error message for a wrong fork CUDA" (#23209)
Summary:
Re-land https://github.com/pytorch/pytorch/pull/23030
Pull Request resolved: https://github.com/pytorch/pytorch/pull/23209

Differential Revision: D16440000

Pulled By: zhangguanheng66

fbshipit-source-id: e05683275522835a33d5a7e6d76b7e94774e4d98
2019-07-24 07:01:04 -07:00

34 lines
858 B
C++

#include <torch/csrc/utils/cuda_lazy_init.h>
#include <torch/csrc/python_headers.h>
#include <mutex>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/utils/object_ptr.h>
namespace torch {
namespace utils {
static bool run_yet = false;
void cuda_lazy_init() {
AutoGIL g;
// Protected by the GIL. We don't use call_once because under ASAN it
// has a buggy implementation that deadlocks if an instance throws an
// exception. In any case, call_once isn't necessary, because we
// have taken a lock.
if (!run_yet) {
auto module = THPObjectPtr(PyImport_ImportModule("torch.cuda"));
if (!module) throw python_error();
auto res = THPObjectPtr(PyObject_CallMethod(module.get(), "_lazy_init", ""));
if (!res) throw python_error();
run_yet = true;
}
}
void set_run_yet_variable_to_false() {
run_yet = false;
}
}
}