Skip to content

Commit

Permalink
Add torch.ops.aten.cumsum.default lowering to ttnn.moreh_cumsum
Browse files Browse the repository at this point in the history
  • Loading branch information
Po-Sheng Chang authored and jerrysky3 committed Nov 1, 2024
1 parent 19b7dbe commit c84d65d
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 0 deletions.
42 changes: 42 additions & 0 deletions tests/lowering/misc/test_cumsum.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import torch
import torch_ttnn
import pytest
import ttnn

from tests.utils import assert_with_pcc


class CumsumModule(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, input, dim):
return torch.ops.aten.cumsum.default(input, dim=dim)


@pytest.mark.parametrize(
"input_shapes, dim",
[
((1, 32), 1),
],
)
def test_cumsum(device, input_shapes, dim):
m = CumsumModule()
inputs = torch.rand(input_shapes, dtype=torch.bfloat16)
result_before = m.forward(inputs, dim)

option = torch_ttnn.TorchTtnnOption(device=device)
option.gen_graphviz = False

# The compilation is lazy, so we need to run forward once to trigger the compilation
m = torch.compile(m, backend=torch_ttnn.backend, options=option)

result_after = m.forward(inputs, dim)
option._out_fx_graphs[0].print_tabular()

# Check the graph has be rewritten and contain ttnn ops
nodes = [node.target for node in option._out_fx_graphs[0].nodes]
assert nodes.count(ttnn.moreh_cumsum) == 1

# Check inference result
assert_with_pcc(result_before, result_after, pcc=0.99)
1 change: 1 addition & 0 deletions torch_ttnn/passes/lowering/add_data_move_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,7 @@ def is_tt_compute(node) -> bool:
ttnn.squeeze,
ttnn.full,
ttnn.as_tensor,
ttnn.moreh_cumsum,
]
)

Expand Down
7 changes: 7 additions & 0 deletions torch_ttnn/passes/lowering/to_tt_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -756,6 +756,13 @@ def rewrite_node(node):
else:
return None

if node.target == torch.ops.aten.cumsum.default:
tensor, dim = args
input_shape = tensor.meta["val"].size()
rank = len(input_shape)
dim = (dim + rank) % rank
return g.call_function(ttnn.moreh_cumsum, (tensor, dim), kwargs)

with g.inserting_before(node):
new_node = rewrite_node(node)
if new_node is not None:
Expand Down

0 comments on commit c84d65d

Please sign in to comment.