Skip to content

Commit

Permalink
#0: Skip weights bfloat8 on grayskull
Browse files Browse the repository at this point in the history
  • Loading branch information
sankarmanoj-tt committed Feb 13, 2025
1 parent 78bf6ea commit 68e85df
Showing 1 changed file with 8 additions and 4 deletions.
12 changes: 8 additions & 4 deletions tests/ttnn/unit_tests/operations/test_new_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -789,7 +789,7 @@ def test_conv_for_segformer_512x512(
)
@pytest.mark.parametrize(
"weights_dtype",
[ttnn.bfloat16, ttnn.bfloat8_b],
[ttnn.bfloat16],
)
@pytest.mark.parametrize(
"activations_dtype",
Expand Down Expand Up @@ -1220,7 +1220,7 @@ def test_resnet50_conv_wh_fp32(
)
@pytest.mark.parametrize(
"weights_dtype",
[ttnn.bfloat8_b],
[ttnn.bfloat16],
)
@pytest.mark.parametrize(
"activations_dtype",
Expand Down Expand Up @@ -1503,7 +1503,7 @@ def test_sd_conv_wh(
)
@pytest.mark.parametrize(
"weights_dtype",
[ttnn.bfloat8_b],
[ttnn.bfloat16],
)
@pytest.mark.parametrize(
"activations_dtype",
Expand Down Expand Up @@ -1958,6 +1958,7 @@ def test_unet_conv_groups_8_wh(
)


@skip_for_grayskull()
@pytest.mark.parametrize("device_params", [{"l1_small_size": 16384}], indirect=True)
@pytest.mark.parametrize(
"batch_size, output_channels, input_channels, input_height, input_width, filter_height, filter_width, stride_h, stride_w, pad_h, pad_w, config_override",
Expand Down Expand Up @@ -2019,6 +2020,7 @@ def test_halo_reshard_conv(
)


@skip_for_grayskull()
@pytest.mark.skip("New API needs to be tested")
@pytest.mark.parametrize("device_params", [{"l1_small_size": 16384}], indirect=True)
@pytest.mark.parametrize(
Expand Down Expand Up @@ -2380,6 +2382,7 @@ def test_yolov4_conv_groups_larger_than_one(
)


@skip_for_grayskull()
@pytest.mark.parametrize("device_params", [{"l1_small_size": 16384}], indirect=True)
@pytest.mark.parametrize(
" output_channels, input_channels, input_height, input_width, filter_height, filter_width, stride_h, stride_w, pad_h, pad_w, shard_layout, config_override, use_shallow_conv_variant, groups",
Expand Down Expand Up @@ -2630,7 +2633,7 @@ def test_conv_for_vanilla_unet(
)
@pytest.mark.parametrize(
"weights_dtype",
[ttnn.bfloat8_b, ttnn.bfloat16],
[ttnn.bfloat16],
)
@pytest.mark.parametrize(
"activations_dtype",
Expand Down Expand Up @@ -2870,6 +2873,7 @@ def test_shallow_conv_with_tiled_input(device):

# Tests running conv2d which maps to matmul w/o sharding the input tensor.
# Output tensor is in DRAM.
@skip_for_grayskull()
@pytest.mark.parametrize("device_params", [{"l1_small_size": 16384}], indirect=True)
@pytest.mark.parametrize("tiled_input", [True, False])
@pytest.mark.parametrize("input_on_device", [True, False])
Expand Down

0 comments on commit 68e85df

Please sign in to comment.