don't set CUDA_MODULE_LOADING (#158712)

If needed, it'll be set in `_C._cuda_init()`. setenv is not threadsafe, so this can cause segfaults due to getenv/setenv races.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/158712
Approved by: https://github.com/eqy
This commit is contained in:
Natalia Gimelshein 2025-07-20 01:36:23 +00:00 committed by PyTorch MergeBot
parent b4abf41425
commit 4869f71170
2 changed files with 0 additions and 7 deletions

View File

@ -6484,11 +6484,6 @@ class TestCudaAutocast(TestAutocast):
with torch.cuda.amp.autocast():
_ = torch.ones(10)
def test_cuda_module_loading_env(self):
torch.cuda.init()
val = os.environ.get("CUDA_MODULE_LOADING", "")
self.assertEqual(val, "LAZY")
@unittest.skipIf(
os.environ.get("USE_LEGACY_DRIVER", None) == "1", "Doesn't work with older driver"

View File

@ -379,8 +379,6 @@ def _lazy_init():
)
# This function throws if there's a driver initialization error, no GPUs
# are found or any other error occurs
if "CUDA_MODULE_LOADING" not in os.environ:
os.environ["CUDA_MODULE_LOADING"] = "LAZY"
torch._C._cuda_init()
# Some of the queued calls may reentrantly call _lazy_init();
# we need to just return without initializing in that case.