mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
don't set CUDA_MODULE_LOADING (#158712)
If needed, it'll be set in `_C._cuda_init()`. setenv is not threadsafe, so this can cause segfaults due to getenv/setenv races. Pull Request resolved: https://github.com/pytorch/pytorch/pull/158712 Approved by: https://github.com/eqy
This commit is contained in:
parent
b4abf41425
commit
4869f71170
|
|
@ -6484,11 +6484,6 @@ class TestCudaAutocast(TestAutocast):
|
|||
with torch.cuda.amp.autocast():
|
||||
_ = torch.ones(10)
|
||||
|
||||
def test_cuda_module_loading_env(self):
|
||||
torch.cuda.init()
|
||||
val = os.environ.get("CUDA_MODULE_LOADING", "")
|
||||
self.assertEqual(val, "LAZY")
|
||||
|
||||
|
||||
@unittest.skipIf(
|
||||
os.environ.get("USE_LEGACY_DRIVER", None) == "1", "Doesn't work with older driver"
|
||||
|
|
|
|||
|
|
@ -379,8 +379,6 @@ def _lazy_init():
|
|||
)
|
||||
# This function throws if there's a driver initialization error, no GPUs
|
||||
# are found or any other error occurs
|
||||
if "CUDA_MODULE_LOADING" not in os.environ:
|
||||
os.environ["CUDA_MODULE_LOADING"] = "LAZY"
|
||||
torch._C._cuda_init()
|
||||
# Some of the queued calls may reentrantly call _lazy_init();
|
||||
# we need to just return without initializing in that case.
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user