skip test dynamo for aot_dispatch tests on ci (#142185)

A lot of tests in test_aotdispatch.py is not meaningful (from user's perspective) when we run with dynamo. So we skip them.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/142185
Approved by: https://github.com/zou3519
ghstack dependencies: #141610
This commit is contained in:
Yidi Wu 2024-12-05 16:47:13 -08:00 committed by PyTorch MergeBot
parent b838bdd4d4
commit 7eda06b366
3 changed files with 12 additions and 12 deletions

View File

@ -313,6 +313,7 @@ test_dynamo_wrapped_shard() {
--exclude-jit-executor \ --exclude-jit-executor \
--exclude-distributed-tests \ --exclude-distributed-tests \
--exclude-torch-export-tests \ --exclude-torch-export-tests \
--exclude-aot-dispatch-tests \
--shard "$1" "$NUM_TEST_SHARDS" \ --shard "$1" "$NUM_TEST_SHARDS" \
--verbose \ --verbose \
--upload-artifacts-while-running --upload-artifacts-while-running

View File

@ -72,7 +72,6 @@ from torch.testing._internal.common_utils import (
parametrize, parametrize,
run_tests, run_tests,
skipIfRocm, skipIfRocm,
skipIfTorchDynamo,
TestCase, TestCase,
xfail_inherited_tests, xfail_inherited_tests,
xfailIfS390X, xfailIfS390X,
@ -787,7 +786,6 @@ def forward(self, primals_1):
self.assertEqual(x_ref.grad, x_test.grad) self.assertEqual(x_ref.grad, x_test.grad)
self.assertEqual(x_ref_view.grad, x_test_view.grad) self.assertEqual(x_ref_view.grad, x_test_view.grad)
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/127470")
def test_nested_subclasses(self): def test_nested_subclasses(self):
@torch.compile(backend="aot_eager") @torch.compile(backend="aot_eager")
def f(x): def f(x):
@ -814,7 +812,6 @@ def forward(self, primals_1):
self.assertTrue(isinstance(aaaa.grad.a, TwoTensor)) self.assertTrue(isinstance(aaaa.grad.a, TwoTensor))
self.assertTrue(isinstance(aaaa.grad.b, TwoTensor)) self.assertTrue(isinstance(aaaa.grad.b, TwoTensor))
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/127470")
def test_nested_subclasses_non_nested_grad(self): def test_nested_subclasses_non_nested_grad(self):
@torch.compile(backend="aot_eager") @torch.compile(backend="aot_eager")
def f(x): def f(x):
@ -841,7 +838,6 @@ metadata incorrectly.
new_out.sum().backward() new_out.sum().backward()
@unittest.skipIf(IS_WINDOWS, "Windows isn't supported for this case") @unittest.skipIf(IS_WINDOWS, "Windows isn't supported for this case")
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/127470")
def test_custom_tensor_metadata(self): def test_custom_tensor_metadata(self):
def f(x): def f(x):
x_elem = x.elem x_elem = x.elem
@ -871,7 +867,6 @@ metadata incorrectly.
isinstance(custom_aa_compile.grad.elem, ConstantExtraMetadataTensor) isinstance(custom_aa_compile.grad.elem, ConstantExtraMetadataTensor)
) )
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/127470")
def test_nested_subclasses_complicated_inps(self): def test_nested_subclasses_complicated_inps(self):
def f(x, y, z): def f(x, y, z):
temp = x + y temp = x + y
@ -923,7 +918,6 @@ metadata incorrectly.
self.assertTrue(torch.allclose(y_nested_compile.grad.a.b, y_nested.grad.a.b)) self.assertTrue(torch.allclose(y_nested_compile.grad.a.b, y_nested.grad.a.b))
@unittest.skipIf(IS_WINDOWS, "Windows isn't supported for this case") @unittest.skipIf(IS_WINDOWS, "Windows isn't supported for this case")
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/127470")
def test_nested_subclasses_complicated_inps_mixed(self): def test_nested_subclasses_complicated_inps_mixed(self):
def f(x, y): def f(x, y):
y_elem = y.elem y_elem = y.elem
@ -960,7 +954,6 @@ metadata incorrectly.
self.assertTrue(torch.allclose(x_nested_compile.grad, x_nested.grad)) self.assertTrue(torch.allclose(x_nested_compile.grad, x_nested.grad))
self.assertTrue(torch.allclose(custom_aa_compile.grad, custom_aa.grad)) self.assertTrue(torch.allclose(custom_aa_compile.grad, custom_aa.grad))
@skipIfTorchDynamo("This test suite already uses dynamo")
def test_composite_impl_compile(self): def test_composite_impl_compile(self):
class Foo(torch.nn.Module): class Foo(torch.nn.Module):
def __init__(self) -> None: def __init__(self) -> None:
@ -2466,7 +2459,6 @@ def forward(self, primals_1, primals_2):
# Not checking equality of ref and x as Exception is expected # Not checking equality of ref and x as Exception is expected
# Partially addresses https://github.com/pytorch/pytorch/issues/106457 # Partially addresses https://github.com/pytorch/pytorch/issues/106457
@skipIfTorchDynamo()
def test_input_mutation_false_aliasing(self): def test_input_mutation_false_aliasing(self):
def f(a, b): def f(a, b):
a.mul_(3) a.mul_(3)
@ -3898,7 +3890,6 @@ def forward(self, tangents_1):
str(out2.grad_fn.__class__), """<class 'ViewBackward0'>""" str(out2.grad_fn.__class__), """<class 'ViewBackward0'>"""
) )
@skipIfTorchDynamo()
@patch("torch._dynamo.config.assume_static_by_default", False) @patch("torch._dynamo.config.assume_static_by_default", False)
def test_dynamic_output_aliases_input_view_meta_replay(self): def test_dynamic_output_aliases_input_view_meta_replay(self):
# - torch.compile: using it so we can have a SymInt in the FX graph. # - torch.compile: using it so we can have a SymInt in the FX graph.
@ -5409,7 +5400,6 @@ def forward(self, tangents_1, tangents_2):
self.assertEqual(out_ref.a, out_test.a) self.assertEqual(out_ref.a, out_test.a)
self.assertEqual(out_ref.b, out_test.b) self.assertEqual(out_ref.b, out_test.b)
@skipIfTorchDynamo()
def test_aot_dispatch_incorrect_backward(self): def test_aot_dispatch_incorrect_backward(self):
# a is a subclass, b is not # a is a subclass, b is not
def f(a, b): def f(a, b):
@ -6197,7 +6187,6 @@ class TestAOTModuleSimplified(AOTTestCase):
out_buffer = out.values() out_buffer = out.values()
ga, gb, gc = torch.autograd.grad(out_buffer.sum(), (a, b, c)) ga, gb, gc = torch.autograd.grad(out_buffer.sum(), (a, b, c))
@skipIfTorchDynamo()
def test_wrong_guess_tangent_type(self): def test_wrong_guess_tangent_type(self):
def fn(x): def fn(x):
return x.clone() return x.clone()
@ -6696,7 +6685,6 @@ instantiate_device_type_tests(TestEagerFusionModuleInfo, globals(), only_for=onl
"test_subclass_metadata_mutation_req_grad_False", "test_subclass_metadata_mutation_req_grad_False",
] ]
) )
@skipIfTorchDynamo("This test suite already uses dynamo")
class TestAOTAutogradWithDynamo(TestAOTAutograd): class TestAOTAutogradWithDynamo(TestAOTAutograd):
""" """
These are the same as TestAOTAutograd tests, but we run dynamo first to get a graph module. These are the same as TestAOTAutograd tests, but we run dynamo first to get a graph module.

View File

@ -662,6 +662,9 @@ JIT_EXECUTOR_TESTS = [
INDUCTOR_TESTS = [test for test in TESTS if test.startswith(INDUCTOR_TEST_PREFIX)] INDUCTOR_TESTS = [test for test in TESTS if test.startswith(INDUCTOR_TEST_PREFIX)]
DISTRIBUTED_TESTS = [test for test in TESTS if test.startswith(DISTRIBUTED_TEST_PREFIX)] DISTRIBUTED_TESTS = [test for test in TESTS if test.startswith(DISTRIBUTED_TEST_PREFIX)]
TORCH_EXPORT_TESTS = [test for test in TESTS if test.startswith("export")] TORCH_EXPORT_TESTS = [test for test in TESTS if test.startswith("export")]
AOT_DISPATCH_TESTS = [
test for test in TESTS if test.startswith("functorch/test_aotdispatch")
]
FUNCTORCH_TESTS = [test for test in TESTS if test.startswith("functorch")] FUNCTORCH_TESTS = [test for test in TESTS if test.startswith("functorch")]
ONNX_TESTS = [test for test in TESTS if test.startswith("onnx")] ONNX_TESTS = [test for test in TESTS if test.startswith("onnx")]
CPP_TESTS = [test for test in TESTS if test.startswith(CPP_TEST_PREFIX)] CPP_TESTS = [test for test in TESTS if test.startswith(CPP_TEST_PREFIX)]
@ -1632,6 +1635,11 @@ def parse_args():
action="store_true", action="store_true",
help="exclude torch export tests", help="exclude torch export tests",
) )
parser.add_argument(
"--exclude-aot-dispatch-tests",
action="store_true",
help="exclude aot dispatch tests",
)
parser.add_argument( parser.add_argument(
"--exclude-distributed-tests", "--exclude-distributed-tests",
action="store_true", action="store_true",
@ -1798,6 +1806,9 @@ def get_selected_tests(options) -> List[str]:
if options.exclude_torch_export_tests: if options.exclude_torch_export_tests:
options.exclude.extend(TORCH_EXPORT_TESTS) options.exclude.extend(TORCH_EXPORT_TESTS)
if options.exclude_aot_dispatch_tests:
options.exclude.extend(AOT_DISPATCH_TESTS)
# these tests failing in CUDA 11.6 temporary disabling. issue https://github.com/pytorch/pytorch/issues/75375 # these tests failing in CUDA 11.6 temporary disabling. issue https://github.com/pytorch/pytorch/issues/75375
if torch.version.cuda is not None: if torch.version.cuda is not None:
options.exclude.extend(["distributions/test_constraints"]) options.exclude.extend(["distributions/test_constraints"])