mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[inductor] disable capture_pre_autograd_graph related UTs on Windows (#132848)
Contined to https://github.com/pytorch/pytorch/pull/132841 We disable `capture_pre_autograd_graph` related UT on Windows. Disable `test_lstm_packed_change_input_sizes` and `test_multihead_attention` UTs on Windows. **TODO:** Turn on them after fix `capture_pre_autograd_graph` issue on Windows. ## Local Test: Linux is not skiped: <img width="1387" alt="image" src="https://github.com/user-attachments/assets/28dfbb4b-d9c0-4d5b-be84-d7b3697bcd3f"> And we can skiped them on Windows: <img width="853" alt="image" src="https://github.com/user-attachments/assets/e96ebcf8-9bf3-43aa-93fd-fb33d3743573"> Co-authored-by: Jiong Gong <jiong.gong@intel.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/132848 Approved by: https://github.com/jgong5, https://github.com/desertfire
This commit is contained in:
parent
7ea8374c0e
commit
59bbaea3a7
|
|
@ -9,7 +9,12 @@ from torch._inductor.test_case import TestCase as InductorTestCase
|
|||
from torch.testing._internal.common_device_type import (
|
||||
get_desired_device_type_test_bases,
|
||||
)
|
||||
from torch.testing._internal.common_utils import IS_MACOS, slowTest, TEST_WITH_ROCM
|
||||
from torch.testing._internal.common_utils import (
|
||||
IS_MACOS,
|
||||
IS_WINDOWS,
|
||||
slowTest,
|
||||
TEST_WITH_ROCM,
|
||||
)
|
||||
from torch.testing._internal.inductor_utils import HAS_CPU
|
||||
|
||||
|
||||
|
|
@ -266,13 +271,14 @@ if RUN_CPU:
|
|||
"test_lstm_packed_change_input_sizes",
|
||||
"cpu",
|
||||
test_cpu_repro.CPUReproTests(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest("test_max_pool2d6"),
|
||||
BaseTest("test_mm_views"),
|
||||
BaseTest("test_multihead_attention", "cpu", test_cpu_repro.CPUReproTests()),
|
||||
BaseTest(
|
||||
"test_multi_threading",
|
||||
condition=not IS_WINDOWS,
|
||||
# Two threads compile, so we expect the output code to be printed twice.
|
||||
code_string_count={"py::gil_scoped_release release;": 2},
|
||||
),
|
||||
|
|
@ -281,37 +287,37 @@ if RUN_CPU:
|
|||
"test_qconv2d",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest(
|
||||
"test_qconv2d_relu",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest(
|
||||
"test_qconv2d_add",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest(
|
||||
"test_qconv2d_add_relu",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest(
|
||||
"test_qconv2d_dequant_promotion",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest(
|
||||
"test_qconv2d_maxpool2d_linear_dynamic",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestDynamicPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
func_inputs=[
|
||||
[
|
||||
"op_qconv2d_pointwise.call",
|
||||
|
|
@ -324,49 +330,49 @@ if RUN_CPU:
|
|||
"test_qlinear",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest(
|
||||
"test_qlinear_relu",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest(
|
||||
"test_qlinear_gelu",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest(
|
||||
"test_qlinear_add",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest(
|
||||
"test_qlinear_add_relu",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest(
|
||||
"test_qlinear_dequant_promotion",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest(
|
||||
"test_dynamic_qlinear",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest(
|
||||
"test_dynamic_qlinear_qat",
|
||||
"cpu",
|
||||
test_mkldnn_pattern_matcher.TestPatternMatcher(),
|
||||
condition=torch.backends.mkldnn.is_available(),
|
||||
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
|
||||
),
|
||||
BaseTest("test_randint"),
|
||||
BaseTest("test_randn_with_dtype_and_device"),
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user