pytorch/torch/csrc/utils/cuda_lazy_init.cpp
Guanheng Zhang a6e45a69a8 Fix error message for a wrong fork CUDA (#23030)
Summary:
Fix https://github.com/pytorch/pytorch/issues/17357
Unblock 1.2 release.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/23030

Differential Revision: D16368004

Pulled By: zhangguanheng66

fbshipit-source-id: 44b6977790ce768efa4777bae41d4b26dae5f288
2019-07-22 15:04:32 -07:00

34 lines
858 B
C++

#include <torch/csrc/utils/cuda_lazy_init.h>
#include <torch/csrc/python_headers.h>
#include <mutex>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/utils/object_ptr.h>
namespace torch {
namespace utils {
static bool run_yet = false;
void cuda_lazy_init() {
AutoGIL g;
// Protected by the GIL. We don't use call_once because under ASAN it
// has a buggy implementation that deadlocks if an instance throws an
// exception. In any case, call_once isn't necessary, because we
// have taken a lock.
if (!run_yet) {
auto module = THPObjectPtr(PyImport_ImportModule("torch.cuda"));
if (!module) throw python_error();
auto res = THPObjectPtr(PyObject_CallMethod(module.get(), "_lazy_init", ""));
if (!res) throw python_error();
run_yet = true;
}
}
void set_run_yet_variable_to_false() {
run_yet = false;
}
}
}