diff --git a/test/test_meta.py b/test/test_meta.py index 29fc07a9eee..fef1d0df506 100644 --- a/test/test_meta.py +++ b/test/test_meta.py @@ -17,6 +17,7 @@ from torch.testing._internal.common_utils import ( skipIfTorchDynamo, suppress_warnings, TEST_WITH_ASAN, + TEST_WITH_TORCHDYNAMO, run_tests, dtype_abbrs, parametrize @@ -1105,6 +1106,13 @@ class TestMeta(TestCase): @suppress_warnings @ops(op_db) def test_meta_outplace(self, device, dtype, op): + skip_op_names = ( + "fft.ihfft", + "fft.ihfft2", + "linalg.lu_solve", + ) + if TEST_WITH_TORCHDYNAMO and op.name in skip_op_names: + raise unittest.SkipTest("flaky") # run the OpInfo sample inputs, cross-referencing them with the # meta implementation and check the results are the same. All # the heavy lifting happens in MetaCrossRefFunctionMode diff --git a/test/test_nestedtensor.py b/test/test_nestedtensor.py index b9ee51ed41f..f82523a514b 100644 --- a/test/test_nestedtensor.py +++ b/test/test_nestedtensor.py @@ -26,6 +26,7 @@ from torch.testing._internal.common_utils import ( parametrize, run_tests, skipIfSlowGradcheckEnv, + skipIfTorchDynamo, subtest, TestCase, ) @@ -934,6 +935,7 @@ class TestNestedTensorDeviceType(TestCase): is_cuda = 'cuda' in str(device) self.assertEqual(nt.is_cuda, is_cuda) + @skipIfTorchDynamo("flaky") @dtypes(torch.float, torch.float16, torch.double) def test_nested_tensor_indexing(self, device, dtype): # edge case: empty nested tensor diff --git a/test/test_ops.py b/test/test_ops.py index f69d0a369c5..96b5616aa30 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -34,6 +34,7 @@ from torch.testing._internal.common_utils import ( suppress_warnings, noncontiguous_like, TEST_WITH_ASAN, + TEST_WITH_TORCHDYNAMO, TEST_WITH_UBSAN, IS_WINDOWS, IS_FBCODE, @@ -651,6 +652,8 @@ class TestCommon(TestCase): # - out= with the correct dtype and device, but the wrong shape @ops(ops_and_refs, dtypes=OpDTypes.none) def test_out_warning(self, device, op): + if TEST_WITH_TORCHDYNAMO and op.name == "_refs.clamp": + self.skipTest("flaky") # Prefers running in float32 but has a fallback for the first listed supported dtype supported_dtypes = op.supported_dtypes(self.device_type) if len(supported_dtypes) == 0: