From a7c596870d92f15cf723f53139bf1a63ac6da4a2 Mon Sep 17 00:00:00 2001 From: Xuehai Pan Date: Wed, 5 Jun 2024 21:53:49 +0000 Subject: [PATCH] [BE][Eazy] remove `torch.torch.xxx` usages (#127800) NB: `torch` is exposed in `torch/__init__.py`. So there can be `torch.torch.torch.xxx`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/127800 Approved by: https://github.com/peterbell10, https://github.com/kit1980, https://github.com/malfet --- test/dynamo/test_ctx_manager.py | 4 ++-- test/test_cuda.py | 4 ++-- torch/_dynamo/convert_frame.py | 4 +--- torch/_inductor/lowering.py | 2 +- torch/nn/modules/pooling.py | 6 +++--- 5 files changed, 9 insertions(+), 11 deletions(-) diff --git a/test/dynamo/test_ctx_manager.py b/test/dynamo/test_ctx_manager.py index 651c392f5dd..47f8e8eeb86 100644 --- a/test/dynamo/test_ctx_manager.py +++ b/test/dynamo/test_ctx_manager.py @@ -497,7 +497,7 @@ class CtxManagerTests(torch._dynamo.test_case.TestCase): a_float32 = torch.rand((8, 8), device="cuda") b_float32 = torch.rand((8, 8), device="cuda") - with torch.cuda.amp.autocast(dtype=torch.torch.float64): + with torch.cuda.amp.autocast(dtype=torch.float64): c_float64 = torch.mm(a_float32, b_float32) return c_float64 @@ -796,7 +796,7 @@ class CtxManagerTests(torch._dynamo.test_case.TestCase): self.assertEqual(exported.dtype, real_dtype) self.assertEqual(exported.device.index, 0) - self.assertEqual(exported.dtype, torch.torch.float16) + self.assertEqual(exported.dtype, torch.float16) @unittest.skipIf(not torch.cuda.is_available(), "requires cuda") def test_autocast_arguments_binding(self): diff --git a/test/test_cuda.py b/test/test_cuda.py index 785f0499df0..7ec86bd6f47 100644 --- a/test/test_cuda.py +++ b/test/test_cuda.py @@ -380,10 +380,10 @@ class TestCuda(TestCase): def check_workspace_size(inp): torch._C._cuda_clearCublasWorkspaces() - start = torch.torch.cuda.memory_stats()["active_bytes.all.allocated"] + start = torch.cuda.memory_stats()["active_bytes.all.allocated"] with torch.no_grad(): torch.matmul(inp, inp) - finish = torch.torch.cuda.memory_stats()["active_bytes.all.allocated"] + finish = torch.cuda.memory_stats()["active_bytes.all.allocated"] return finish - start # check default diff --git a/torch/_dynamo/convert_frame.py b/torch/_dynamo/convert_frame.py index 37ff5a8a299..88fb2a85bca 100644 --- a/torch/_dynamo/convert_frame.py +++ b/torch/_dynamo/convert_frame.py @@ -178,9 +178,7 @@ def preserve_global_state(fn): finally: cleanup.close() torch._C._set_grad_enabled(prior_grad_mode) - torch.torch.autograd.grad_mode._enter_inference_mode( - prior_inference_mode - ) + torch.autograd.grad_mode._enter_inference_mode(prior_inference_mode) torch.use_deterministic_algorithms( prior_deterministic, warn_only=prior_warn_only ) diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py index 20b0082eb1d..0a1909890e6 100644 --- a/torch/_inductor/lowering.py +++ b/torch/_inductor/lowering.py @@ -2421,7 +2421,7 @@ def slice_scatter(x, src, dim=0, start=None, end=None, step=1): ops.index_expr( ModularIndexing(idx[dim] - start, 1, step), torch.int64 ), - ops.constant(0, torch.torch.int64), + ops.constant(0, torch.int64), ) ) assert mask diff --git a/torch/nn/modules/pooling.py b/torch/nn/modules/pooling.py index 3f02bb63a84..61ce5639098 100644 --- a/torch/nn/modules/pooling.py +++ b/torch/nn/modules/pooling.py @@ -381,9 +381,9 @@ class MaxUnpool2d(_MaxUnpoolNd): [ 0., 14., 0., 16.]]]]) >>> # Now using output_size to resolve an ambiguous size for the inverse >>> input = torch.tensor([[[[ 1., 2., 3., 4., 5.], - [ 6., 7., 8., 9., 10.], - [11., 12., 13., 14., 15.], - [16., 17., 18., 19., 20.]]]]) + [ 6., 7., 8., 9., 10.], + [11., 12., 13., 14., 15.], + [16., 17., 18., 19., 20.]]]]) >>> output, indices = pool(input) >>> # This call will not work without specifying output_size >>> unpool(output, indices, output_size=input.size())