-
Notifications
You must be signed in to change notification settings - Fork 6
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'main' into kw/layout_device_ops
- Loading branch information
Showing
7 changed files
with
130 additions
and
171 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
import torch | ||
import torch_ttnn | ||
import pytest | ||
|
||
from tests.utils import assert_with_pcc | ||
|
||
|
||
class CumsumModule(torch.nn.Module): | ||
def __init__(self): | ||
super().__init__() | ||
|
||
def forward(self, input, dim): | ||
return torch.ops.aten.cumsum.default(input, dim=dim) | ||
|
||
|
||
@pytest.mark.parametrize( | ||
"input_shapes, dim", | ||
[ | ||
((1, 32), -1), | ||
((1, 45), -1), | ||
((1, 59), 1), | ||
((1, 5), -1), | ||
((1, 60), 1), | ||
((1, 10), 1), | ||
((4, 32, 32), 0), | ||
((1, 4, 32, 32), 1), | ||
((4, 4, 32, 32), 0), | ||
((1, 23, 40), 1), | ||
((4, 32), 0), | ||
pytest.param( | ||
(1, 1, 32, 32), | ||
3, | ||
marks=pytest.mark.xfail(reson="inner-most 2 dims are not supported (#367)"), | ||
), | ||
pytest.param( | ||
(1, 23, 40), | ||
2, | ||
marks=pytest.mark.xfail(reson="inner-most 2 dims are not supported (#367)"), | ||
), | ||
], | ||
) | ||
def test_cumsum(device, input_shapes, dim): | ||
m = CumsumModule() | ||
inputs = torch.rand(input_shapes, dtype=torch.bfloat16) | ||
result_before = m.forward(inputs, dim) | ||
|
||
option = torch_ttnn.TorchTtnnOption(device=device, gen_graphviz=False) | ||
# The compilation is lazy, so we need to run forward once to trigger the compilation | ||
m = torch.compile(m, backend=torch_ttnn.backend, options=option) | ||
|
||
result_after = m.forward(inputs, dim) | ||
option._out_fx_graphs[0].print_tabular() | ||
|
||
# Check the graph has be rewritten and contain ttnn ops | ||
nodes = [node.target for node in option._out_fx_graphs[0].nodes] | ||
assert nodes.count(torch.ops.aten.cumsum.default) == 0 | ||
assert_with_pcc(result_before, result_after, pcc=0.99) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,178 +1,44 @@ | ||
import torch | ||
import torch_ttnn | ||
import pytest | ||
import ttnn | ||
from torch_ttnn.utils import ( | ||
TtnnRowMajorLayout, | ||
TtnnTileLayout, | ||
) | ||
|
||
from tests.utils import assert_with_pcc | ||
|
||
class ExpandModule(torch.nn.Module): | ||
def __init__(self): | ||
super().__init__() | ||
|
||
def forward(self, x, new_shape): | ||
return x.expand(new_shape) | ||
|
||
|
||
class ExpandAfterOpModule(torch.nn.Module): | ||
def __init__(self): | ||
super().__init__() | ||
|
||
def forward(self, x, new_shape): | ||
a = torch.clone(x) | ||
return a.expand(new_shape) | ||
|
||
|
||
class ExpandBetweenOpsModule(torch.nn.Module): | ||
class ExpandModule(torch.nn.Module): | ||
def __init__(self): | ||
super().__init__() | ||
|
||
def forward(self, x, new_shape): | ||
a = torch.clone(x) | ||
ex = a.expand(new_shape) | ||
return torch.add(ex, ex) | ||
def forward(self, input_tensor, shape): | ||
return torch.ops.aten.expand.default(input_tensor, shape) | ||
|
||
|
||
@pytest.mark.xfail(reason="lowering issue (#67)") | ||
@pytest.mark.parametrize( | ||
"input_shape, new_shape", | ||
"input_shape, output_shape", | ||
[ | ||
((1, 4), (4, 4)), | ||
((1, 2), (32, -1)), | ||
((1, 4), (32, -1)), | ||
((1, 6), (32, -1)), | ||
pytest.param((1, 3), (32, -1), marks=pytest.mark.xfail()), | ||
pytest.param((12, 1), (-1, 32), marks=pytest.mark.xfail()), | ||
], | ||
) | ||
def test_expand(device, input_shape, new_shape): | ||
def test_expand(device, input_shape, output_shape): | ||
m = ExpandModule() | ||
tensor = torch.rand(input_shape, dtype=torch.bfloat16) | ||
inputs = [tensor, new_shape] | ||
result_before = m.forward(*inputs) | ||
option = torch_ttnn.TorchTtnnOption(device=device) | ||
option.gen_graphviz = True | ||
# The compilation is lazy, so we need to run forward once to trigger the compilation | ||
m = torch.compile(m, backend=torch_ttnn.backend, options=option) | ||
result_after = m.forward(*inputs) | ||
option._out_fx_graphs[0].print_tabular() | ||
|
||
# Check the graph has be rewritten and contain ttnn ops | ||
nodes = list(option._out_fx_graphs[0].nodes) | ||
target = [node.target for node in nodes] | ||
assert target.count(ttnn.repeat) == 1 | ||
assert nodes[target.index(ttnn.repeat)].args[1].target == ttnn.Shape | ||
# Check inference result | ||
assert torch.allclose(result_before, result_after, rtol=0.2) | ||
|
||
input_tensor = torch.rand(input_shape, dtype=torch.bfloat16) | ||
result_before = m.forward(input_tensor, output_shape) | ||
|
||
@pytest.mark.xfail(reason="lowering issue (#67)") | ||
@pytest.mark.parametrize( | ||
"input_shape, new_shape", | ||
[ | ||
((1, 4), (4, 4)), | ||
], | ||
) | ||
def test_expand_after_op(device, input_shape, new_shape): | ||
m = ExpandAfterOpModule() | ||
tensor = torch.rand(input_shape, dtype=torch.bfloat16) | ||
inputs = [tensor, new_shape] | ||
result_before = m.forward(*inputs) | ||
option = torch_ttnn.TorchTtnnOption(device=device) | ||
option.gen_graphviz = True | ||
# The compilation is lazy, so we need to run forward once to trigger the compilation | ||
m = torch.compile(m, backend=torch_ttnn.backend, options=option) | ||
result_after = m.forward(*inputs) | ||
option._out_fx_graphs[0].print_tabular() | ||
|
||
# Check the graph has be rewritten and contain ttnn ops | ||
nodes = list(option._out_fx_graphs[0].nodes) | ||
target = [node.target for node in nodes] | ||
assert target.count(ttnn.repeat) == 1 | ||
repeat_node = nodes[target.index(ttnn.repeat)] | ||
assert repeat_node.args[0].target == ttnn.to_layout | ||
assert repeat_node.args[0].args[0].target == ttnn.clone | ||
assert type(repeat_node.args[0].args[1]) is type(TtnnRowMajorLayout()) | ||
assert repeat_node.args[1].target == ttnn.Shape | ||
# Check inference result | ||
assert torch.allclose(result_before, result_after, rtol=0.2) | ||
|
||
|
||
@pytest.mark.xfail(reason="lowering issue (#67)") | ||
@pytest.mark.parametrize( | ||
"input_shape, new_shape", | ||
[ | ||
((1, 4), (4, 4)), | ||
], | ||
) | ||
def test_expand_before_op(device, input_shape, new_shape): | ||
class ExpandBeforeOpModule(torch.nn.Module): | ||
def __init__(self): | ||
super().__init__() | ||
option.gen_graphviz = False | ||
|
||
def forward(self, x, new_shape): | ||
ex = x.expand(new_shape) | ||
return torch.add(ex, ex) | ||
|
||
m = ExpandBeforeOpModule() | ||
tensor = torch.rand(input_shape, dtype=torch.bfloat16) | ||
inputs = [tensor, new_shape] | ||
result_before = m.forward(*inputs) | ||
option = torch_ttnn.TorchTtnnOption(device=device) | ||
option.gen_graphviz = True | ||
# The compilation is lazy, so we need to run forward once to trigger the compilation | ||
m = torch.compile(m, backend=torch_ttnn.backend, options=option) | ||
result_after = m.forward(*inputs) | ||
option._out_fx_graphs[0].print_tabular() | ||
|
||
# Check the graph has be rewritten and contain ttnn ops | ||
nodes = list(option._out_fx_graphs[0].nodes) | ||
target = [node.target for node in nodes] | ||
assert target.count(ttnn.repeat) == 1 | ||
assert nodes[target.index(ttnn.repeat)].args[1].target == ttnn.Shape | ||
# to_layout that follows ttnn.repeat | ||
to_layout_idx = target.index(ttnn.to_layout, target.index(ttnn.repeat)) | ||
to_layout_node = nodes[to_layout_idx] | ||
assert to_layout_node.args[0].target == ttnn.repeat | ||
assert type(to_layout_node.args[1]) is type(TtnnTileLayout()) | ||
assert target.count(ttnn.add) == 1 | ||
assert to_layout_idx < target.index(ttnn.add) | ||
|
||
# Check inference result | ||
assert torch.allclose(result_before, result_after, rtol=0.2) | ||
|
||
|
||
@pytest.mark.xfail(reason="lowering issue (#67)") | ||
@pytest.mark.parametrize( | ||
"input_shape, new_shape", | ||
[ | ||
((1, 4), (4, 4)), | ||
], | ||
) | ||
def test_expand_between_ops(device, input_shape, new_shape): | ||
m = ExpandBetweenOpsModule() | ||
tensor = torch.rand(input_shape, dtype=torch.bfloat16) | ||
inputs = [tensor, new_shape] | ||
result_before = m.forward(*inputs) | ||
option = torch_ttnn.TorchTtnnOption(device=device) | ||
option.gen_graphviz = True | ||
# The compilation is lazy, so we need to run forward once to trigger the compilation | ||
m = torch.compile(m, backend=torch_ttnn.backend, options=option) | ||
result_after = m.forward(*inputs) | ||
result_after = m.forward(input_tensor, output_shape) | ||
option._out_fx_graphs[0].print_tabular() | ||
|
||
# Check the graph has be rewritten and contain ttnn ops | ||
nodes = list(option._out_fx_graphs[0].nodes) | ||
target = [node.target for node in nodes] | ||
assert target.count(ttnn.repeat) == 1 | ||
repeat_node = nodes[target.index(ttnn.repeat)] | ||
assert repeat_node.args[0].target == ttnn.to_layout | ||
assert repeat_node.args[0].args[0].target == ttnn.clone | ||
assert type(repeat_node.args[0].args[1]) is type(TtnnRowMajorLayout()) | ||
assert repeat_node.args[1].target == ttnn.Shape | ||
# to_layout that follows ttnn.repeat | ||
to_layout_idx = target.index(ttnn.to_layout, target.index(ttnn.repeat)) | ||
to_layout_node = nodes[to_layout_idx] | ||
assert to_layout_node.args[0].target == ttnn.repeat | ||
assert type(to_layout_node.args[1]) is type(TtnnTileLayout()) | ||
assert target.count(ttnn.add) == 1 | ||
assert to_layout_idx < target.index(ttnn.add) | ||
# Check inference result | ||
assert torch.allclose(result_before, result_after, rtol=0.2) | ||
nodes = [node.target for node in option._out_fx_graphs[0].nodes] | ||
assert nodes.count(torch.ops.aten.expand.default) == 0 | ||
|
||
assert_with_pcc(result_before, result_after, pcc=0.99) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.