Skip to content

Commit

Permalink
[inductor] disable capture_pre_autograd_graph related UTs on Windows (p…
Browse files Browse the repository at this point in the history
…ytorch#132848)

Contined to pytorch#132841

We disable `capture_pre_autograd_graph` related UT on Windows.
Disable `test_lstm_packed_change_input_sizes` and `test_multihead_attention` UTs on Windows.

**TODO:**
Turn on them after fix `capture_pre_autograd_graph` issue on Windows.

## Local Test:
Linux is not skiped:
<img width="1387" alt="image" src="https://github.com/user-attachments/assets/28dfbb4b-d9c0-4d5b-be84-d7b3697bcd3f">

And we can skiped them on Windows:
<img width="853" alt="image" src="https://github.com/user-attachments/assets/e96ebcf8-9bf3-43aa-93fd-fb33d3743573">

Co-authored-by: Jiong Gong <[email protected]>
Pull Request resolved: pytorch#132848
Approved by: https://github.com/jgong5, https://github.com/desertfire
  • Loading branch information
xuhancn authored and pytorchmergebot committed Aug 7, 2024
1 parent 7ea8374 commit 59bbaea
Showing 1 changed file with 22 additions and 16 deletions.
38 changes: 22 additions & 16 deletions test/inductor/test_cpu_cpp_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,12 @@
from torch.testing._internal.common_device_type import (
get_desired_device_type_test_bases,
)
from torch.testing._internal.common_utils import IS_MACOS, slowTest, TEST_WITH_ROCM
from torch.testing._internal.common_utils import (
IS_MACOS,
IS_WINDOWS,
slowTest,
TEST_WITH_ROCM,
)
from torch.testing._internal.inductor_utils import HAS_CPU


Expand Down Expand Up @@ -266,13 +271,14 @@ class BaseTest(NamedTuple):
"test_lstm_packed_change_input_sizes",
"cpu",
test_cpu_repro.CPUReproTests(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest("test_max_pool2d6"),
BaseTest("test_mm_views"),
BaseTest("test_multihead_attention", "cpu", test_cpu_repro.CPUReproTests()),
BaseTest(
"test_multi_threading",
condition=not IS_WINDOWS,
# Two threads compile, so we expect the output code to be printed twice.
code_string_count={"py::gil_scoped_release release;": 2},
),
Expand All @@ -281,37 +287,37 @@ class BaseTest(NamedTuple):
"test_qconv2d",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qconv2d_relu",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qconv2d_add",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qconv2d_add_relu",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qconv2d_dequant_promotion",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qconv2d_maxpool2d_linear_dynamic",
"cpu",
test_mkldnn_pattern_matcher.TestDynamicPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
func_inputs=[
[
"op_qconv2d_pointwise.call",
Expand All @@ -324,49 +330,49 @@ class BaseTest(NamedTuple):
"test_qlinear",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qlinear_relu",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qlinear_gelu",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qlinear_add",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qlinear_add_relu",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qlinear_dequant_promotion",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_dynamic_qlinear",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_dynamic_qlinear_qat",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest("test_randint"),
BaseTest("test_randn_with_dtype_and_device"),
Expand Down

0 comments on commit 59bbaea

Please sign in to comment.