mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[Break XPU] Fix failed test cases which are introduced by community for XPU. (#155317)
Fixes #155186, Fixes #154701 Pull Request resolved: https://github.com/pytorch/pytorch/pull/155317 Approved by: https://github.com/jansel
This commit is contained in:
parent
694028f502
commit
29e6033ff3
|
|
@ -6095,7 +6095,6 @@ GPU_TEST_FAILURES = {
|
|||
"test_scaled_dot_product_efficient_attention": fail_gpu(("xpu",)),
|
||||
# No fft implementation for XPU yet.
|
||||
"test_fft_c2c": fail_gpu(("xpu",), is_skip=True),
|
||||
"test_stft": fail_gpu(("xpu",)),
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -999,8 +999,8 @@ class TestTiling(TestCase):
|
|||
self.assertEqual(out, f(*inps))
|
||||
|
||||
def test_penalized_small_dim(self):
|
||||
x = torch.rand([2000, 1], device="cuda")
|
||||
y = torch.rand([4, 1], device="cuda").T
|
||||
x = torch.rand([2000, 1], device=GPU_TYPE)
|
||||
y = torch.rand([4, 1], device=GPU_TYPE).T
|
||||
|
||||
# dont tile when it doesnt affect total coalesced mem accesses much
|
||||
def f(x, y):
|
||||
|
|
|
|||
|
|
@ -1224,10 +1224,7 @@ class TestMaxAutotune(TestCase):
|
|||
cache_key, events = get_cache_key_and_events()
|
||||
|
||||
if not TEST_WITH_ROCM:
|
||||
self.assertExpectedInline(
|
||||
remove_white_space(cache_key),
|
||||
remove_white_space(
|
||||
"""{
|
||||
expected = """{
|
||||
'input_nodes':[
|
||||
"[[10,22],[22,1],torch.float32,device(type='cuda',index=0),0]",
|
||||
"[[22,30],[30,1],torch.float32,device(type='cuda',index=0),0]"],
|
||||
|
|
@ -1236,7 +1233,11 @@ class TestMaxAutotune(TestCase):
|
|||
'num_consumer_groups':0,'num_buffers_warp_spec':0,'epilogue_fn_hash':'identity',
|
||||
'kwargs':{'EVEN_K':False,'ALLOW_TF32':True,'USE_FAST_ACCUM':False,'ACC_TYPE':'tl.float32',
|
||||
'BLOCK_M':16,'BLOCK_N':32,'BLOCK_K':16,'GROUP_M':8}}"""
|
||||
),
|
||||
|
||||
expected = expected.replace("cuda", GPU_TYPE)
|
||||
self.assertExpectedInline(
|
||||
remove_white_space(cache_key),
|
||||
remove_white_space(expected),
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -1263,10 +1264,7 @@ class TestMaxAutotune(TestCase):
|
|||
cache_key, events = get_cache_key_and_events()
|
||||
|
||||
if not TEST_WITH_ROCM:
|
||||
self.assertExpectedInline(
|
||||
remove_white_space(cache_key),
|
||||
remove_white_space(
|
||||
"""{
|
||||
expected = """{
|
||||
'input_nodes':[
|
||||
"[[s77,s17],[s17,1],torch.float32,device(type='cuda',index=0),0]",
|
||||
"[[s17,s94],[s94,1],torch.float32,device(type='cuda',index=0),0]"],
|
||||
|
|
@ -1274,7 +1272,10 @@ class TestMaxAutotune(TestCase):
|
|||
'layout':"[[s77,s94],[s94,1],torch.float32,device(type='cuda',index=0),0]",'num_consumer_groups':0,
|
||||
'num_buffers_warp_spec':0,'epilogue_fn_hash':'identity','kwargs':{'EVEN_K':False,'ALLOW_TF32':True,
|
||||
'USE_FAST_ACCUM':False,'ACC_TYPE':'tl.float32','BLOCK_M':16,'BLOCK_N':32,'BLOCK_K':16,'GROUP_M':8}}"""
|
||||
),
|
||||
expected = expected.replace("cuda", GPU_TYPE)
|
||||
self.assertExpectedInline(
|
||||
remove_white_space(cache_key),
|
||||
remove_white_space(expected),
|
||||
)
|
||||
|
||||
self.assertExpectedInline(
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user