From 7eda06b36674afa117b28ad807c3421c94e775c1 Mon Sep 17 00:00:00 2001 From: Yidi Wu Date: Thu, 5 Dec 2024 16:47:13 -0800 Subject: [PATCH] skip test dynamo for aot_dispatch tests on ci (#142185) A lot of tests in test_aotdispatch.py is not meaningful (from user's perspective) when we run with dynamo. So we skip them. Pull Request resolved: https://github.com/pytorch/pytorch/pull/142185 Approved by: https://github.com/zou3519 ghstack dependencies: #141610 --- .ci/pytorch/test.sh | 1 + test/functorch/test_aotdispatch.py | 12 ------------ test/run_test.py | 11 +++++++++++ 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.ci/pytorch/test.sh b/.ci/pytorch/test.sh index f90344ba430..fda7d036358 100755 --- a/.ci/pytorch/test.sh +++ b/.ci/pytorch/test.sh @@ -313,6 +313,7 @@ test_dynamo_wrapped_shard() { --exclude-jit-executor \ --exclude-distributed-tests \ --exclude-torch-export-tests \ + --exclude-aot-dispatch-tests \ --shard "$1" "$NUM_TEST_SHARDS" \ --verbose \ --upload-artifacts-while-running diff --git a/test/functorch/test_aotdispatch.py b/test/functorch/test_aotdispatch.py index 77d70a9816e..d3f8772b2a2 100644 --- a/test/functorch/test_aotdispatch.py +++ b/test/functorch/test_aotdispatch.py @@ -72,7 +72,6 @@ from torch.testing._internal.common_utils import ( parametrize, run_tests, skipIfRocm, - skipIfTorchDynamo, TestCase, xfail_inherited_tests, xfailIfS390X, @@ -787,7 +786,6 @@ def forward(self, primals_1): self.assertEqual(x_ref.grad, x_test.grad) self.assertEqual(x_ref_view.grad, x_test_view.grad) - @skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/127470") def test_nested_subclasses(self): @torch.compile(backend="aot_eager") def f(x): @@ -814,7 +812,6 @@ def forward(self, primals_1): self.assertTrue(isinstance(aaaa.grad.a, TwoTensor)) self.assertTrue(isinstance(aaaa.grad.b, TwoTensor)) - @skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/127470") def test_nested_subclasses_non_nested_grad(self): @torch.compile(backend="aot_eager") def f(x): @@ -841,7 +838,6 @@ metadata incorrectly. new_out.sum().backward() @unittest.skipIf(IS_WINDOWS, "Windows isn't supported for this case") - @skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/127470") def test_custom_tensor_metadata(self): def f(x): x_elem = x.elem @@ -871,7 +867,6 @@ metadata incorrectly. isinstance(custom_aa_compile.grad.elem, ConstantExtraMetadataTensor) ) - @skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/127470") def test_nested_subclasses_complicated_inps(self): def f(x, y, z): temp = x + y @@ -923,7 +918,6 @@ metadata incorrectly. self.assertTrue(torch.allclose(y_nested_compile.grad.a.b, y_nested.grad.a.b)) @unittest.skipIf(IS_WINDOWS, "Windows isn't supported for this case") - @skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/127470") def test_nested_subclasses_complicated_inps_mixed(self): def f(x, y): y_elem = y.elem @@ -960,7 +954,6 @@ metadata incorrectly. self.assertTrue(torch.allclose(x_nested_compile.grad, x_nested.grad)) self.assertTrue(torch.allclose(custom_aa_compile.grad, custom_aa.grad)) - @skipIfTorchDynamo("This test suite already uses dynamo") def test_composite_impl_compile(self): class Foo(torch.nn.Module): def __init__(self) -> None: @@ -2466,7 +2459,6 @@ def forward(self, primals_1, primals_2): # Not checking equality of ref and x as Exception is expected # Partially addresses https://github.com/pytorch/pytorch/issues/106457 - @skipIfTorchDynamo() def test_input_mutation_false_aliasing(self): def f(a, b): a.mul_(3) @@ -3898,7 +3890,6 @@ def forward(self, tangents_1): str(out2.grad_fn.__class__), """""" ) - @skipIfTorchDynamo() @patch("torch._dynamo.config.assume_static_by_default", False) def test_dynamic_output_aliases_input_view_meta_replay(self): # - torch.compile: using it so we can have a SymInt in the FX graph. @@ -5409,7 +5400,6 @@ def forward(self, tangents_1, tangents_2): self.assertEqual(out_ref.a, out_test.a) self.assertEqual(out_ref.b, out_test.b) - @skipIfTorchDynamo() def test_aot_dispatch_incorrect_backward(self): # a is a subclass, b is not def f(a, b): @@ -6197,7 +6187,6 @@ class TestAOTModuleSimplified(AOTTestCase): out_buffer = out.values() ga, gb, gc = torch.autograd.grad(out_buffer.sum(), (a, b, c)) - @skipIfTorchDynamo() def test_wrong_guess_tangent_type(self): def fn(x): return x.clone() @@ -6696,7 +6685,6 @@ instantiate_device_type_tests(TestEagerFusionModuleInfo, globals(), only_for=onl "test_subclass_metadata_mutation_req_grad_False", ] ) -@skipIfTorchDynamo("This test suite already uses dynamo") class TestAOTAutogradWithDynamo(TestAOTAutograd): """ These are the same as TestAOTAutograd tests, but we run dynamo first to get a graph module. diff --git a/test/run_test.py b/test/run_test.py index fc98d900e91..cf1311f2516 100755 --- a/test/run_test.py +++ b/test/run_test.py @@ -662,6 +662,9 @@ JIT_EXECUTOR_TESTS = [ INDUCTOR_TESTS = [test for test in TESTS if test.startswith(INDUCTOR_TEST_PREFIX)] DISTRIBUTED_TESTS = [test for test in TESTS if test.startswith(DISTRIBUTED_TEST_PREFIX)] TORCH_EXPORT_TESTS = [test for test in TESTS if test.startswith("export")] +AOT_DISPATCH_TESTS = [ + test for test in TESTS if test.startswith("functorch/test_aotdispatch") +] FUNCTORCH_TESTS = [test for test in TESTS if test.startswith("functorch")] ONNX_TESTS = [test for test in TESTS if test.startswith("onnx")] CPP_TESTS = [test for test in TESTS if test.startswith(CPP_TEST_PREFIX)] @@ -1632,6 +1635,11 @@ def parse_args(): action="store_true", help="exclude torch export tests", ) + parser.add_argument( + "--exclude-aot-dispatch-tests", + action="store_true", + help="exclude aot dispatch tests", + ) parser.add_argument( "--exclude-distributed-tests", action="store_true", @@ -1798,6 +1806,9 @@ def get_selected_tests(options) -> List[str]: if options.exclude_torch_export_tests: options.exclude.extend(TORCH_EXPORT_TESTS) + if options.exclude_aot_dispatch_tests: + options.exclude.extend(AOT_DISPATCH_TESTS) + # these tests failing in CUDA 11.6 temporary disabling. issue https://github.com/pytorch/pytorch/issues/75375 if torch.version.cuda is not None: options.exclude.extend(["distributions/test_constraints"])