Skip to content

Commit

Permalink
#10643: Remove tt eager bindings
Browse files Browse the repository at this point in the history
  • Loading branch information
VirdhatchaniKN committed Jul 24, 2024
1 parent c0ef0dc commit d40e3cf
Show file tree
Hide file tree
Showing 12 changed files with 43 additions and 397 deletions.
16 changes: 0 additions & 16 deletions docs/source/ttnn/ttnn/dependencies/tt_lib.rst
Original file line number Diff line number Diff line change
Expand Up @@ -276,28 +276,20 @@ Tensor elementwise operations

.. autofunction:: tt_lib.tensor.div_no_nan

.. autofunction:: tt_lib.tensor.log1p

.. autofunction:: tt_lib.tensor.clip

.. autofunction:: tt_lib.tensor.hardtanh

.. autofunction:: tt_lib.tensor.cbrt

.. autofunction:: tt_lib.tensor.hypot

.. autofunction:: tt_lib.tensor.mish

.. autofunction:: tt_lib.tensor.polyval

.. autofunction:: tt_lib.tensor.mac

.. autofunction:: tt_lib.tensor.hardsigmoid

.. autofunction:: tt_lib.tensor.swish

.. autofunction:: tt_lib.tensor.hardswish

.. autofunction:: tt_lib.tensor.softsign

.. autofunction:: tt_lib.tensor.softshrink
Expand All @@ -318,8 +310,6 @@ Tensor elementwise operations

.. autofunction:: tt_lib.tensor.atan2

.. autofunction:: tt_lib.tensor.logical_xor

.. autofunction:: tt_lib.tensor.logical_xori

.. autofunction:: tt_lib.tensor.subalpha
Expand All @@ -332,16 +322,10 @@ Tensor elementwise operations

.. autofunction:: tt_lib.tensor.logit

.. autofunction:: tt_lib.tensor.lgamma

.. autofunction:: tt_lib.tensor.logical_andi

.. autofunction:: tt_lib.tensor.multigammaln

.. autofunction:: tt_lib.tensor.assign

.. autofunction:: tt_lib.tensor.isclose

.. autofunction:: tt_lib.tensor.digamma

.. autofunction:: tt_lib.tensor.logical_ori
Expand Down
2 changes: 1 addition & 1 deletion models/experimental/ssd/tt/ssd_mobilenetv3_convlayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from models.experimental.ssd.ssd_utils import create_batchnorm

ACT_FN_1 = ttnn.relu
ACT_FN_2 = tt_lib.tensor.hardswish
ACT_FN_2 = ttnn.hardswish


class TtMobileNetV3ConvLayer(nn.Module):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def __init__(
)

self.activation = ttnn.relu
self.scale_activation = tt_lib.tensor.hardsigmoid
self.scale_activation = ttnn.hardsigmoid

def forward(self, input: tt_lib.tensor.Tensor) -> tt_lib.tensor.Tensor:
scale = self.avgpool(input)
Expand Down
16 changes: 5 additions & 11 deletions models/experimental/vovnet/tt/effective_se_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
tt_to_torch_tensor,
torch_to_tt_tensor_rm,
)

import ttnn
import tt_lib
from tt_lib import fallback_ops

Expand All @@ -34,12 +34,8 @@ def __init__(
self.device = device
self.base_address = base_address

conv_weight = torch_to_tt_tensor_rm(
state_dict[f"{base_address}.fc.weight"], self.device, put_on_device=False
)
conv_bias = torch_to_tt_tensor_rm(
state_dict[f"{base_address}.fc.bias"], self.device, put_on_device=False
)
conv_weight = torch_to_tt_tensor_rm(state_dict[f"{base_address}.fc.weight"], self.device, put_on_device=False)
conv_bias = torch_to_tt_tensor_rm(state_dict[f"{base_address}.fc.bias"], self.device, put_on_device=False)

self.fc = fallback_ops.Conv2d(
weights=conv_weight,
Expand All @@ -53,7 +49,7 @@ def __init__(
padding_mode="zeros",
)

self.activation = tt_lib.tensor.hardsigmoid
self.activation = ttnn.hardsigmoid

def forward(self, input: tt_lib.tensor.Tensor) -> tt_lib.tensor.Tensor:
out = tt_to_torch_tensor(input)
Expand All @@ -64,7 +60,5 @@ def forward(self, input: tt_lib.tensor.Tensor) -> tt_lib.tensor.Tensor:
out = torch_to_tt_tensor_rm(out, self.device, put_on_device=False)
out = self.fc(out)
out = self.activation(out)
out = tt_lib.tensor.bcast(
input, out, tt_lib.tensor.BcastOpMath.MUL, tt_lib.tensor.BcastOpDim.HW
)
out = tt_lib.tensor.bcast(input, out, tt_lib.tensor.BcastOpMath.MUL, tt_lib.tensor.BcastOpDim.HW)
return out
34 changes: 28 additions & 6 deletions tests/tt_eager/python_api_testing/sweep_tests/tt_lib_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -1319,7 +1319,7 @@ def eltwise_isclose(
):
t0 = setup_tt_tensor(x, device, layout[0], input_mem_config[0], dtype[0])
t1 = setup_tt_tensor(y, device, layout[1], input_mem_config[1], dtype[1])
t2 = ttl.tensor.isclose(t0, t1, rtol, atol, equal_nan, output_mem_config=output_mem_config)
t2 = ttnn.isclose(t0, t1, rtol, atol, equal_nan, memory_config=output_mem_config)

return tt2torch_tensor(t2)

Expand Down Expand Up @@ -2348,6 +2348,28 @@ def unary_op(
return unary_op


def make_ttnn_unary_op(ttl_tensor_unop):
@setup_host_and_device
def unary_op(
x,
*args,
device,
dtype,
layout,
input_mem_config,
output_mem_config=ttl.tensor.MemoryConfig(
ttl.tensor.TensorMemoryLayout.INTERLEAVED, ttl.tensor.BufferType.DRAM
),
**kwargs,
):
t0 = setup_tt_tensor(x, device, layout[0], input_mem_config[0], dtype[0])
t1 = ttl_tensor_unop(t0, memory_config=output_mem_config)

return tt2torch_tensor(t1)

return unary_op


def make_unary_op_optional_output(ttl_tensor_unop):
@setup_host_and_device
def unary_op_optional_output(
Expand Down Expand Up @@ -2467,8 +2489,8 @@ def unary_op_optional_output_with_fast_approx(
eltwise_asinh = make_unary_op(ttl.tensor.asinh)
eltwise_acosh = make_unary_op(ttl.tensor.acosh)
eltwise_tanhshrink = make_unary_op(ttl.tensor.tanhshrink)
eltwise_lgamma = make_unary_op(ttl.tensor.lgamma)
eltwise_multigammaln = make_unary_op(ttl.tensor.multigammaln)
eltwise_lgamma = make_ttnn_unary_op(ttnn.lgamma)
eltwise_multigammaln = make_ttnn_unary_op(ttnn.multigammaln)
eltwise_softsign = make_unary_op(ttl.tensor.softsign)
eltwise_relu = make_unary_op_optional_output(ttnn.relu)
eltwise_relu6 = make_unary_op_optional_output(ttnn.relu6)
Expand All @@ -2487,10 +2509,10 @@ def unary_op_optional_output_with_fast_approx(
eltwise_sigmoid_accurate = make_unary_op_optional_output(ttnn.sigmoid_accurate)
eltwise_log_sigmoid = make_unary_op_optional_output(ttnn.log_sigmoid)
eltwise_swish = make_unary_op(ttl.tensor.swish)
eltwise_log1p = make_unary_op(ttl.tensor.log1p)
eltwise_log1p = make_ttnn_unary_op(ttnn.log1p)
eltwise_mish = make_unary_op(ttl.tensor.mish)
eltwise_hardswish = make_unary_op(ttl.tensor.hardswish)
eltwise_hardsigmoid = make_unary_op(ttl.tensor.hardsigmoid)
eltwise_hardswish = make_ttnn_unary_op(ttnn.hardswish)
eltwise_hardsigmoid = make_ttnn_unary_op(ttnn.hardsigmoid)
eltwise_digamma = make_unary_op(ttl.tensor.digamma)
eltwise_silu = make_unary_op_optional_output(ttnn.silu)
eltwise_square = make_unary_op_optional_output(ttnn.square)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include "ttnn/operations/eltwise/unary/unary.hpp"
#include "ttnn/operations/eltwise/complex_unary/device/complex_unary_op.hpp"
#include "ttnn/operations/eltwise/complex_binary/device/complex_binary_op.hpp"
#include "ttnn/operations/eltwise/binary/device/binary_composite_op.hpp"

namespace tt {

Expand Down Expand Up @@ -90,7 +91,7 @@ Tensor complex_abs(const Tensor& input, const MemoryConfig& output_mem_config) {
CHECK_FOR_COMPLEX(input);
Tensor real = get_real(input, output_mem_config);
Tensor imag = get_imag(input, output_mem_config);
return hypot(real, imag, output_mem_config);
return ttnn::operations::binary::_hypot(real, imag, output_mem_config);
}

Tensor complex_recip(const Tensor& input, const MemoryConfig& output_mem_config) {
Expand Down
Loading

0 comments on commit d40e3cf

Please sign in to comment.