Add check for cuda lazy init (#80912)

Validate that no CUDA calls are made during `import torch` call, by
importing torch and limited visible devices to non-existing device

Should prevent regressions like ones reported in https://github.com/pytorch/pytorch/issues/80876

Pull Request resolved: https://github.com/pytorch/pytorch/pull/80912
Approved by: https://github.com/ngimel, https://github.com/atalman
This commit is contained in:
Nikita Shulga 2022-07-06 01:39:24 +00:00 committed by PyTorch MergeBot
parent 04c50fec1c
commit 1ad7ef3f21

View File

@ -3951,6 +3951,15 @@ torch.cuda.synchronize()
loss.backward()
optimizer.step()
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support CUDA_VISIBLE_DEVICES")
@unittest.skipIf(TEST_MULTIGPU, "Testing on one GPU is sufficient")
def test_lazy_init(self):
""" Validate that no CUDA calls are made during `import torch` call"""
from subprocess import check_output
test_script = "import os; import torch;os.environ['CUDA_VISIBLE_DEVICES']='32';print(torch.cuda.device_count())"
rc = check_output([sys.executable, '-c', test_script]).decode("ascii").strip()
self.assertEqual(rc, "0")
class TestCudaComm(TestCase):
def _test_broadcast(self, input):