From 67fdffbc605a166734eeb1a90667040e16bfce43 Mon Sep 17 00:00:00 2001 From: Aswinmcw Date: Mon, 21 Oct 2024 14:27:22 +0000 Subject: [PATCH] #0: Report passed values --- .../operations/ccl/all_gather/device/all_gather_op.cpp | 8 ++++---- .../ccl/reduce_scatter/device/reduce_scatter_op.cpp | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ttnn/cpp/ttnn/operations/ccl/all_gather/device/all_gather_op.cpp b/ttnn/cpp/ttnn/operations/ccl/all_gather/device/all_gather_op.cpp index 5ff63c4b75e2..e7a0fd0f9bd7 100644 --- a/ttnn/cpp/ttnn/operations/ccl/all_gather/device/all_gather_op.cpp +++ b/ttnn/cpp/ttnn/operations/ccl/all_gather/device/all_gather_op.cpp @@ -52,7 +52,7 @@ AllGather create_all_gather_struct( break; } default: - TT_FATAL(false, "Invalid Topology, Accepted topologies are Ring and Linear currently"); + TT_FATAL(false, "Invalid Topology {}, Accepted topologies are Ring and Linear currently", topology); } break; } @@ -143,7 +143,7 @@ AllGatherConfig::AllGatherConfig(Tensor const& input_tensor, Tensor const& outpu void AllGather::validate(const std::vector &input_tensors) const { - TT_FATAL(input_tensors.size() == 1, "Error, Input tensor size should be 1"); + TT_FATAL(input_tensors.size() == 1, "Error, Input tensor size should be 1 but has {}", input_tensors.size()); const auto& input_tensor = input_tensors[0]; const auto& layout = input_tensors[0].get_layout(); const auto& dtype = input_tensors[0].get_dtype(); @@ -155,9 +155,9 @@ void AllGather::validate(const std::vector &input_tensors) const { // TODO: Validate ring TT_FATAL(input_tensor.storage_type() == StorageType::DEVICE, "Operands to all_gather need to be on device!"); TT_FATAL(input_tensor.buffer() != nullptr , "Operands to all_gather need to be allocated in buffers on device!"); - TT_FATAL(this->num_links > 0, "Error, num_links should be more than 0"); + TT_FATAL(this->num_links > 0, "Error, num_links should be more than 0 but has {}", this->num_links); TT_FATAL(this->num_links <= input_tensor.device()->compute_with_storage_grid_size().y, "Worker cores used by links are parallelizaed over rows"); - TT_FATAL(this->receiver_device_id.has_value() || this->sender_device_id.has_value(), "Error, Either receiver or sender device needs to have some value"); + TT_FATAL(this->receiver_device_id.has_value() || this->sender_device_id.has_value(), "Error, All-gather was unable to identify either a sender or receiver device ID and atleast one must be identified for a valid all-gather configuration. The input mesh tensor or all-gather arguments may be incorrect"); TT_FATAL(input_tensor.memory_config().memory_layout == TensorMemoryLayout::INTERLEAVED || input_tensor.memory_config().memory_layout == TensorMemoryLayout::WIDTH_SHARDED || diff --git a/ttnn/cpp/ttnn/operations/ccl/reduce_scatter/device/reduce_scatter_op.cpp b/ttnn/cpp/ttnn/operations/ccl/reduce_scatter/device/reduce_scatter_op.cpp index fde0bef4647d..2c87dd4dd000 100644 --- a/ttnn/cpp/ttnn/operations/ccl/reduce_scatter/device/reduce_scatter_op.cpp +++ b/ttnn/cpp/ttnn/operations/ccl/reduce_scatter/device/reduce_scatter_op.cpp @@ -110,7 +110,7 @@ Tensor reduce_scatter( break; } } - TT_FATAL(receiver_device_id != std::nullopt || sender_device_id != std::nullopt, "Error in reduce scatter op setup, Either receiver or sender device needs to have some value"); + TT_FATAL(receiver_device_id != std::nullopt || sender_device_id != std::nullopt, "Error, Reduce-scatter was unable to identify either a sender or receiver device ID and atleast one must be identified for a valid Reduce-scatter configuration. The input mesh tensor or Reduce-scatter arguments may be incorrect"); return operation::run( ttnn::ReduceScatter{