diff --git a/ttnn/cpp/ttnn/tensor/tensor.cpp b/ttnn/cpp/ttnn/tensor/tensor.cpp index 7ec04d88e6e..d689237d5e9 100644 --- a/ttnn/cpp/ttnn/tensor/tensor.cpp +++ b/ttnn/cpp/ttnn/tensor/tensor.cpp @@ -339,8 +339,8 @@ void Tensor::deallocate(bool force) { }); for (auto worker : this->workers) { - worker->push_work(std::make_shared>( - [worker, dealloc_lambda]() mutable { (*dealloc_lambda)(worker); })); + worker->push_work( + [worker, dealloc_lambda]() mutable { (*dealloc_lambda)(worker); }); } } } else { diff --git a/ttnn/cpp/ttnn/tensor/tensor_ops.cpp b/ttnn/cpp/ttnn/tensor/tensor_ops.cpp index c8c9e0bd782..e63a37fb791 100644 --- a/ttnn/cpp/ttnn/tensor/tensor_ops.cpp +++ b/ttnn/cpp/ttnn/tensor/tensor_ops.cpp @@ -26,9 +26,8 @@ namespace{ inline void SynchronizeWorkerThreads(const std::vector& workers) { // Push empty work to threads and ensure its been picked up - static auto empty_work = std::make_shared>([](){}); for (auto target_device : workers) { - target_device->work_executor.push_work(empty_work); + target_device->work_executor.push_work([](){}); } // Block until work has been picked up, to flush the queue for (auto target_device : workers) {