From 0ca2a79f5b80c1ec8be95e6f7de4182dd90f3502 Mon Sep 17 00:00:00 2001 From: Aidyn-A Date: Tue, 10 Jun 2025 19:59:06 +0000 Subject: [PATCH] [TEST] Modernize test_sort_large (#155546) Since its introduction ~4 years ago, the test `test_sort_large` has always been deselected because it requires 200GB of CUDA memory. Now, as we do have GPUs this big, it gets selected, but fails with `var_mean` not being a member if `torch.Tensor` and `var_mean` accepting only floating point tensors. Pull Request resolved: https://github.com/pytorch/pytorch/pull/155546 Approved by: https://github.com/eqy --- test/test_sort_and_select.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/test_sort_and_select.py b/test/test_sort_and_select.py index daa39964374..360dc058212 100644 --- a/test/test_sort_and_select.py +++ b/test/test_sort_and_select.py @@ -222,14 +222,14 @@ class TestSortAndSelect(TestCase): t = t0.view(1, 8192).expand(2**18 + 1, -1).contiguous() v, i = t.sort() del t - iv, im = i.var_mean(dim=0) + iv, im = torch.var_mean(i.to(dtype), dim=0) del i - vv, vm = v.var_mean(dim=0) + vv, vm = torch.var_mean(v.to(dtype), dim=0) del v self.assertEqual(vv, torch.zeros_like(vv)) self.assertEqual(iv, torch.zeros_like(iv)) - self.assertEqual(vm, torch.arange(255, dtype=dtype, device=device)) - self.assertEqual(im, t0.sort().indices) + self.assertEqual(vm, torch.arange(8192, dtype=dtype, device=device)) + self.assertEqual(im, t0.sort().indices, exact_dtype=False) @dtypes(torch.float32) def test_sort_restride(self, device, dtype):