mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Skip on sm100 later since Tests are non determinisitic (#163552)
This is tracked https://github.com/pytorch/pytorch/issues/163462 skipping since we are seeing sporadic errors locally and on CI, Pull Request resolved: https://github.com/pytorch/pytorch/pull/163552 Approved by: https://github.com/eqy, https://github.com/Skylion007 ghstack dependencies: #163460, #163537
This commit is contained in:
parent
0f674077f4
commit
b3cf5c79dd
|
|
@ -515,13 +515,12 @@ class TestMatmulCuda(InductorTestCase):
|
|||
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support CUTLASS")
|
||||
# TODO(future PR): enable compile for torch._grouped_mm fallback path
|
||||
@unittest.skipIf(not SM90OrLater, "Grouped gemm with compile supported on SM90")
|
||||
@unittest.skipIf(SM100OrLater, "Grouped gemm is inconsistently raising numeric issues see: #163462 ")
|
||||
@parametrize("op", ["2d/2d", "2d/3d", "3d/2d", "3d/3d"])
|
||||
@parametrize("a_row_major", [False, True])
|
||||
@parametrize("b_row_major", [False, True])
|
||||
@parametrize("max_autotune", [False, True])
|
||||
def test_grouped_gemm_compiled(self, op, a_row_major, b_row_major, max_autotune):
|
||||
if max_autotune and SM100OrLater:
|
||||
self.skipTest("Triton templates not supported on SM100+ for grouped_mm")
|
||||
device = "cuda"
|
||||
dtype_AB = torch.bfloat16
|
||||
dtype_offset = torch.int32
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user