[CI] Ensure inductor/test_cpu_cpp_wrapper is actually run in inductor_cpp_wrapper_abi_compatible (#126717)

`inductor/test_cpu_cpp_wrapper` is not actually being run in `inductor_cpp_wrapper_abi_compatible` test config

The cpu device type gets removed in d28868c7e8/torch/testing/_internal/common_device_type.py (L733)

so d28868c7e8/test/inductor/test_cpu_cpp_wrapper.py (L396) returns false.

Feel free to make a PR with a different way to do this (a better RUN_CPU check?)

Add a skip for a failing test.  I am not equipped to fix it

Pull Request resolved: https://github.com/pytorch/pytorch/pull/126717
Approved by: https://github.com/ZainRizvi
This commit is contained in:
Catherine Lee 2024-06-06 18:23:50 +00:00 committed by PyTorch MergeBot
parent 936225d7b2
commit fba21edf5b
2 changed files with 12 additions and 2 deletions

View File

@ -368,7 +368,7 @@ test_inductor_cpp_wrapper_abi_compatible() {
echo "Testing Inductor cpp wrapper mode with TORCHINDUCTOR_ABI_COMPATIBLE=1"
# cpu stack allocation causes segfault and needs more investigation
python test/run_test.py --include inductor/test_cpu_cpp_wrapper
PYTORCH_TESTING_DEVICE_ONLY_FOR="" python test/run_test.py --include inductor/test_cpu_cpp_wrapper
python test/run_test.py --include inductor/test_cuda_cpp_wrapper
TORCHINDUCTOR_CPP_WRAPPER=1 python benchmarks/dynamo/timm_models.py --device cuda --accuracy --amp \

View File

@ -115,6 +115,7 @@ def make_test_case(
slow=False,
func_inputs=None,
code_string_count=None,
skip=None,
):
test_name = f"{name}_{device}" if device else name
if code_string_count is None:
@ -123,6 +124,8 @@ def make_test_case(
func = getattr(tests, test_name)
assert callable(func), "not a callable"
func = slowTest(func) if slow else func
if skip:
func = unittest.skip(skip)(func)
@config.patch(cpp_wrapper=True, search_autotune_cache=False)
def fn(self):
@ -170,6 +173,7 @@ if RUN_CPU:
slow: bool = False
func_inputs: list = None
code_string_count: dict = {}
skip: str = None
for item in [
BaseTest("test_add_complex"),
@ -228,7 +232,9 @@ if RUN_CPU:
torch.backends.mkldnn.is_available()
and torch.ops.mkldnn._is_mkldnn_bf16_supported(),
),
BaseTest("test_linear_packed", "", test_cpu_repro.CPUReproTests()),
BaseTest(
"test_linear_packed", "", test_cpu_repro.CPUReproTests(), skip="Failing"
),
BaseTest(
"test_lstm_packed_change_input_sizes",
"cpu",
@ -302,18 +308,21 @@ if RUN_CPU:
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
skip="Failing",
),
BaseTest(
"test_qlinear_add",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
skip="Failing",
),
BaseTest(
"test_qlinear_add_relu",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
skip="Failing",
),
BaseTest(
"test_qlinear_dequant_promotion",
@ -369,6 +378,7 @@ if RUN_CPU:
item.slow,
item.func_inputs,
item.code_string_count,
skip=item.skip,
)
test_torchinductor.copy_tests(