mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[AOTI] Skip test_simple_multi_arch_embed_kernel_binary_True_cuda (#157301)
Summary: For https://github.com/pytorch/pytorch/issues/156930, still no clue on what went wrong as it is not reproducible locally, but somehow the problem seems only exists when embed_kernel_binary is True. Let's skip it for now. Pull Request resolved: https://github.com/pytorch/pytorch/pull/157301 Approved by: https://github.com/yushangdi
This commit is contained in:
parent
75f489d37f
commit
e5edd013ab
|
|
@ -181,7 +181,9 @@ class AOTInductorTestsTemplate:
|
|||
"toolchain doesn't support ptx to fatbin",
|
||||
)
|
||||
@skipIfRocm
|
||||
@common_utils.parametrize("embed_kernel_binary", [True, False])
|
||||
# Skip embed_kernel_binary == True for now as it shows random
|
||||
# failure on CI
|
||||
@common_utils.parametrize("embed_kernel_binary", [False])
|
||||
def test_simple_multi_arch(self, embed_kernel_binary):
|
||||
if self.device != GPU_TYPE:
|
||||
raise unittest.SkipTest("requires GPU_TYPE")
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user