From 59bbaea3a7d3d362cc0308a414912dd581dc2e33 Mon Sep 17 00:00:00 2001 From: Xu Han Date: Wed, 7 Aug 2024 19:38:01 +0000 Subject: [PATCH] [inductor] disable capture_pre_autograd_graph related UTs on Windows (#132848) Contined to https://github.com/pytorch/pytorch/pull/132841 We disable `capture_pre_autograd_graph` related UT on Windows. Disable `test_lstm_packed_change_input_sizes` and `test_multihead_attention` UTs on Windows. **TODO:** Turn on them after fix `capture_pre_autograd_graph` issue on Windows. ## Local Test: Linux is not skiped: image And we can skiped them on Windows: image Co-authored-by: Jiong Gong Pull Request resolved: https://github.com/pytorch/pytorch/pull/132848 Approved by: https://github.com/jgong5, https://github.com/desertfire --- test/inductor/test_cpu_cpp_wrapper.py | 38 ++++++++++++++++----------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/test/inductor/test_cpu_cpp_wrapper.py b/test/inductor/test_cpu_cpp_wrapper.py index 6283a2aba2001a..e191e48d75dc66 100644 --- a/test/inductor/test_cpu_cpp_wrapper.py +++ b/test/inductor/test_cpu_cpp_wrapper.py @@ -9,7 +9,12 @@ from torch.testing._internal.common_device_type import ( get_desired_device_type_test_bases, ) -from torch.testing._internal.common_utils import IS_MACOS, slowTest, TEST_WITH_ROCM +from torch.testing._internal.common_utils import ( + IS_MACOS, + IS_WINDOWS, + slowTest, + TEST_WITH_ROCM, +) from torch.testing._internal.inductor_utils import HAS_CPU @@ -266,13 +271,14 @@ class BaseTest(NamedTuple): "test_lstm_packed_change_input_sizes", "cpu", test_cpu_repro.CPUReproTests(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest("test_max_pool2d6"), BaseTest("test_mm_views"), BaseTest("test_multihead_attention", "cpu", test_cpu_repro.CPUReproTests()), BaseTest( "test_multi_threading", + condition=not IS_WINDOWS, # Two threads compile, so we expect the output code to be printed twice. code_string_count={"py::gil_scoped_release release;": 2}, ), @@ -281,37 +287,37 @@ class BaseTest(NamedTuple): "test_qconv2d", "cpu", test_mkldnn_pattern_matcher.TestPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest( "test_qconv2d_relu", "cpu", test_mkldnn_pattern_matcher.TestPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest( "test_qconv2d_add", "cpu", test_mkldnn_pattern_matcher.TestPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest( "test_qconv2d_add_relu", "cpu", test_mkldnn_pattern_matcher.TestPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest( "test_qconv2d_dequant_promotion", "cpu", test_mkldnn_pattern_matcher.TestPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest( "test_qconv2d_maxpool2d_linear_dynamic", "cpu", test_mkldnn_pattern_matcher.TestDynamicPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, func_inputs=[ [ "op_qconv2d_pointwise.call", @@ -324,49 +330,49 @@ class BaseTest(NamedTuple): "test_qlinear", "cpu", test_mkldnn_pattern_matcher.TestPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest( "test_qlinear_relu", "cpu", test_mkldnn_pattern_matcher.TestPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest( "test_qlinear_gelu", "cpu", test_mkldnn_pattern_matcher.TestPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest( "test_qlinear_add", "cpu", test_mkldnn_pattern_matcher.TestPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest( "test_qlinear_add_relu", "cpu", test_mkldnn_pattern_matcher.TestPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest( "test_qlinear_dequant_promotion", "cpu", test_mkldnn_pattern_matcher.TestPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest( "test_dynamic_qlinear", "cpu", test_mkldnn_pattern_matcher.TestPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest( "test_dynamic_qlinear_qat", "cpu", test_mkldnn_pattern_matcher.TestPatternMatcher(), - condition=torch.backends.mkldnn.is_available(), + condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS, ), BaseTest("test_randint"), BaseTest("test_randn_with_dtype_and_device"),