mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Add test to xfail_list only for abi_compatible (#128506)
https://github.com/pytorch/pytorch/pull/126717 will skip the tests in both ABI compatible and non-ABI compatible mode. It's not expected to skip them in non-ABI compatible mode since they can actually run successfully in such mode but only have issues in ABI compatible mode. We leverage the existing `xfail_list` for those that will only fail in ABI compatible mode. - `test_qlinear_add` is already in the `xfail_list`. - `test_linear_packed` doesn't fail either in my local run (running with `TORCHINDUCTOR_ABI_COMPATIBLE=1`) or in the CI of this PR so I didn't add it into `xfail_list`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/128506 Approved by: https://github.com/jgong5, https://github.com/desertfire
This commit is contained in:
parent
4bc90185fb
commit
df85f34a14
|
|
@ -95,7 +95,9 @@ if config.abi_compatible:
|
||||||
"test_qconv2d_relu_cpu",
|
"test_qconv2d_relu_cpu",
|
||||||
"test_qlinear_cpu",
|
"test_qlinear_cpu",
|
||||||
"test_qlinear_add_cpu",
|
"test_qlinear_add_cpu",
|
||||||
|
"test_qlinear_add_relu_cpu",
|
||||||
"test_qlinear_dequant_promotion_cpu",
|
"test_qlinear_dequant_promotion_cpu",
|
||||||
|
"test_qlinear_gelu_cpu",
|
||||||
"test_qlinear_relu_cpu",
|
"test_qlinear_relu_cpu",
|
||||||
]
|
]
|
||||||
for test_name in xfail_list:
|
for test_name in xfail_list:
|
||||||
|
|
@ -125,7 +127,6 @@ def make_test_case(
|
||||||
slow=False,
|
slow=False,
|
||||||
func_inputs=None,
|
func_inputs=None,
|
||||||
code_string_count=None,
|
code_string_count=None,
|
||||||
skip=None,
|
|
||||||
):
|
):
|
||||||
test_name = f"{name}_{device}" if device else name
|
test_name = f"{name}_{device}" if device else name
|
||||||
if code_string_count is None:
|
if code_string_count is None:
|
||||||
|
|
@ -134,8 +135,6 @@ def make_test_case(
|
||||||
func = getattr(tests, test_name)
|
func = getattr(tests, test_name)
|
||||||
assert callable(func), "not a callable"
|
assert callable(func), "not a callable"
|
||||||
func = slowTest(func) if slow else func
|
func = slowTest(func) if slow else func
|
||||||
if skip:
|
|
||||||
func = unittest.skip(skip)(func)
|
|
||||||
|
|
||||||
@config.patch(cpp_wrapper=True, search_autotune_cache=False)
|
@config.patch(cpp_wrapper=True, search_autotune_cache=False)
|
||||||
def fn(self):
|
def fn(self):
|
||||||
|
|
@ -183,7 +182,6 @@ if RUN_CPU:
|
||||||
slow: bool = False
|
slow: bool = False
|
||||||
func_inputs: list = None
|
func_inputs: list = None
|
||||||
code_string_count: dict = {}
|
code_string_count: dict = {}
|
||||||
skip: str = None
|
|
||||||
|
|
||||||
for item in [
|
for item in [
|
||||||
BaseTest("test_add_complex"),
|
BaseTest("test_add_complex"),
|
||||||
|
|
@ -242,9 +240,7 @@ if RUN_CPU:
|
||||||
torch.backends.mkldnn.is_available()
|
torch.backends.mkldnn.is_available()
|
||||||
and torch.ops.mkldnn._is_mkldnn_bf16_supported(),
|
and torch.ops.mkldnn._is_mkldnn_bf16_supported(),
|
||||||
),
|
),
|
||||||
BaseTest(
|
BaseTest("test_linear_packed", "", test_cpu_repro.CPUReproTests()),
|
||||||
"test_linear_packed", "", test_cpu_repro.CPUReproTests(), skip="Failing"
|
|
||||||
),
|
|
||||||
BaseTest(
|
BaseTest(
|
||||||
"test_lstm_packed_change_input_sizes",
|
"test_lstm_packed_change_input_sizes",
|
||||||
"cpu",
|
"cpu",
|
||||||
|
|
@ -318,21 +314,18 @@ if RUN_CPU:
|
||||||
"cpu",
|
"cpu",
|
||||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||||
condition=torch.backends.mkldnn.is_available(),
|
condition=torch.backends.mkldnn.is_available(),
|
||||||
skip="Failing",
|
|
||||||
),
|
),
|
||||||
BaseTest(
|
BaseTest(
|
||||||
"test_qlinear_add",
|
"test_qlinear_add",
|
||||||
"cpu",
|
"cpu",
|
||||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||||
condition=torch.backends.mkldnn.is_available(),
|
condition=torch.backends.mkldnn.is_available(),
|
||||||
skip="Failing",
|
|
||||||
),
|
),
|
||||||
BaseTest(
|
BaseTest(
|
||||||
"test_qlinear_add_relu",
|
"test_qlinear_add_relu",
|
||||||
"cpu",
|
"cpu",
|
||||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||||
condition=torch.backends.mkldnn.is_available(),
|
condition=torch.backends.mkldnn.is_available(),
|
||||||
skip="Failing",
|
|
||||||
),
|
),
|
||||||
BaseTest(
|
BaseTest(
|
||||||
"test_qlinear_dequant_promotion",
|
"test_qlinear_dequant_promotion",
|
||||||
|
|
@ -388,7 +381,6 @@ if RUN_CPU:
|
||||||
item.slow,
|
item.slow,
|
||||||
item.func_inputs,
|
item.func_inputs,
|
||||||
item.code_string_count,
|
item.code_string_count,
|
||||||
skip=item.skip,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
test_torchinductor.copy_tests(
|
test_torchinductor.copy_tests(
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user