Skip to content

Commit

Permalink
#18325: cleanup global namespace pollution
Browse files Browse the repository at this point in the history
- Removed using namespace from command_queue_interface.hpp
  • Loading branch information
nhuang-tt committed Mar 1, 2025
1 parent b309929 commit 815de5e
Show file tree
Hide file tree
Showing 478 changed files with 2,760 additions and 2,103 deletions.
14 changes: 7 additions & 7 deletions tests/tt_eager/integration_tests/test_bert.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,18 @@
#include "ttnn/operations/experimental/transformer/split_query_key_value_and_split_heads/split_query_key_value_and_split_heads.hpp"
#include "ttnn/operations/experimental/transformer/concatenate_heads/concatenate_heads.hpp"

using Parameters = std::map<std::string, Tensor>;
using Parameters = std::map<std::string, ttnn::Tensor>;
using ttnn::operations::unary::UnaryOpType;
using ttnn::operations::unary::UnaryWithParam;

MemoryConfig l1_memory_config = tt::tt_metal::MemoryConfig{
ttnn::MemoryConfig l1_memory_config = tt::tt_metal::MemoryConfig{
.memory_layout = tt::tt_metal::TensorMemoryLayout::INTERLEAVED, .buffer_type = tt::tt_metal::BufferType::L1};
MemoryConfig dram_memory_config = tt::tt_metal::MemoryConfig{
ttnn::MemoryConfig dram_memory_config = tt::tt_metal::MemoryConfig{
.memory_layout = tt::tt_metal::TensorMemoryLayout::INTERLEAVED, .buffer_type = tt::tt_metal::BufferType::DRAM};

Tensor encoder(
Tensor&& hidden_states,
const Tensor& attention_mask,
ttnn::Tensor encoder(
ttnn::Tensor&& hidden_states,
const ttnn::Tensor& attention_mask,
const Parameters& parameters,
std::size_t encoder_index,
const std::uint32_t head_size) {
Expand Down Expand Up @@ -192,7 +192,7 @@ Tensor encoder(
return feedforward_layernorm_output;
}

Tensor qa_head(Tensor&& hidden_states, const Parameters& parameters) {
ttnn::Tensor qa_head(ttnn::Tensor&& hidden_states, const Parameters& parameters) {
auto output = ttnn::operations::matmul::matmul(
hidden_states, parameters.at("qa_head_weight"), /*bias=*/std::nullopt, ttnn::operations::matmul::Matmul{});
hidden_states.deallocate();
Expand Down
2 changes: 1 addition & 1 deletion tests/tt_eager/ops/test_bmm_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ int main(int argc, char** argv) {
ttnn::operations::matmul::Matmul{
/*program_config=*/std::nullopt,
/*bcast_batch=*/std::nullopt,
operation::DEFAULT_OUTPUT_MEMORY_CONFIG,
tt::tt_metal::operation::DEFAULT_OUTPUT_MEMORY_CONFIG,
/*output_dtype=*/std::nullopt,
/*compute_kernel_config=*/std::nullopt,
/*untilize_out=*/false,
Expand Down
2 changes: 1 addition & 1 deletion tests/tt_eager/ops/test_eltwise_binary_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ int main() {
run_binary_ops();

// Allocate a tensor to show that the addresses aren't cached
auto input_tensor = ttnn::random::uniform(bfloat16(0.0f), bfloat16(0.0f), Shape({1, 1, 32, 32}))
auto input_tensor = ttnn::random::uniform(bfloat16(0.0f), bfloat16(0.0f), ttnn::Shape({1, 1, 32, 32}))
.to_layout(Layout::TILE)
.to_device(device);

Expand Down
4 changes: 2 additions & 2 deletions tests/tt_eager/ops/test_eltwise_unary_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ void test_operation_infrastructure() {
ttnn::operations::unary::operation_attributes_t op_args{
{UnaryWithParam{UnaryOpType::SQRT}},
DataType::BFLOAT16,
MemoryConfig{.memory_layout = tt::tt_metal::TensorMemoryLayout::INTERLEAVED},
tt::tt_metal::MemoryConfig{.memory_layout = tt::tt_metal::TensorMemoryLayout::INTERLEAVED},
false,
false};
ttnn::operations::unary::tensor_args_t tensor_args{input_tensor};
Expand Down Expand Up @@ -159,7 +159,7 @@ namespace tt_metal {
template <bool approx_value = false>
struct exp_with_param {
static Tensor fn(const tt::tt_metal::Tensor& t) {
return ttnn::exp(t, approx_value, operation::DEFAULT_OUTPUT_MEMORY_CONFIG);
return ttnn::exp(t, approx_value, tt::tt_metal::operation::DEFAULT_OUTPUT_MEMORY_CONFIG);
}
};
} // namespace tt_metal
Expand Down
11 changes: 5 additions & 6 deletions tests/tt_eager/ops/test_fold_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,22 +12,21 @@
#include <tt-metalium/host_api.hpp>

using namespace tt;
using namespace tt::tt_metal;
using namespace constants;

void run_fold(IDevice* device, const ttnn::Shape& shape) {
Tensor input_tensor = ttnn::random::random(shape).to_layout(Layout::ROW_MAJOR).to_device(device);
void run_fold(tt::tt_metal::IDevice* device, const ttnn::Shape& shape) {
ttnn::Tensor input_tensor = ttnn::random::random(shape).to_layout(ttnn::Layout::ROW_MAJOR).to_device(device);
uint32_t stride_h = 2;
uint32_t stride_w = 2;
Tensor device_output_tensor = ttnn::fold(ttnn::DefaultQueueId, input_tensor, stride_h, stride_w);
Tensor output_tensor = device_output_tensor.cpu();
ttnn::Tensor device_output_tensor = ttnn::fold(ttnn::DefaultQueueId, input_tensor, stride_h, stride_w);
ttnn::Tensor output_tensor = device_output_tensor.cpu();
}

int main(int argc, char** argv) {
int device_id = 0;
tt_metal::IDevice* device = tt_metal::CreateDevice(device_id);

run_fold(device, Shape({1, 2, 2, 2}));
run_fold(device, ttnn::Shape({1, 2, 2, 2}));
bool pass = CloseDevice(device);

if (pass) {
Expand Down
25 changes: 13 additions & 12 deletions tests/tt_eager/ops/test_sliding_window_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ using namespace ttnn::operations::sliding_window;

// From owned_buffer of type bfloat16 of create float vector for convolution operation.
vector<float> create_filter_vec(
const owned_buffer::Buffer<bfloat16>& filter_tensor_buf, uint32_t filter_h, uint32_t filter_w) {
const tt::tt_metal::owned_buffer::Buffer<bfloat16>& filter_tensor_buf, uint32_t filter_h, uint32_t filter_w) {
vector<float> filter_vector;
for (auto h = 0; h < filter_h; h++) {
for (auto w = 0; w < filter_w; w++) {
Expand All @@ -32,8 +32,8 @@ vector<float> create_filter_vec(

// Compare calculated convolution buffer with Golden convolution
uint32_t compare_conv_out_with_golden(
const owned_buffer::Buffer<bfloat16>& out_golden_tensor_buf,
const owned_buffer::Buffer<bfloat16>& conv_tensor_buf) {
const tt::tt_metal::owned_buffer::Buffer<bfloat16>& out_golden_tensor_buf,
const tt::tt_metal::owned_buffer::Buffer<bfloat16>& conv_tensor_buf) {
uint32_t diff = 0;
if (out_golden_tensor_buf != conv_tensor_buf) {
assert(out_golden_tensor_buf.size() == conv_tensor_buf.size());
Expand Down Expand Up @@ -125,13 +125,13 @@ uint32_t validate_generate_halo_kernel_config(
uint32_t validate_generate_functions(
tt::tt_metal::IDevice* device,
const SlidingWindowConfig& config,
const owned_buffer::Buffer<bfloat16>& input_padded_tensor_buf,
const tt::tt_metal::owned_buffer::Buffer<bfloat16>& input_padded_tensor_buf,
const vector<float>& filter_vector,
const owned_buffer::Buffer<bfloat16>& out_golden_tensor_buf,
const tt::tt_metal::owned_buffer::Buffer<bfloat16>& out_golden_tensor_buf,
uint32_t reshard_num_cores_nhw = 0,
bool remote_read = false) {
log_debug(tt::LogTest, "Validating generate functions for config = {}", config);
owned_buffer::Buffer<bfloat16> conv_tensor_buf;
tt::tt_metal::owned_buffer::Buffer<bfloat16> conv_tensor_buf;
uint32_t diff;
uint32_t failed_tests = 0;
auto pad_metadata = generate_pad_metadata(config);
Expand Down Expand Up @@ -381,14 +381,15 @@ int main() {
ttnn::Shape filter_tensor_shape({config.window_hw.first, config.window_hw.second});

Tensor input_padded_tensor =
ttnn::random::random(input_tensor_shape, DataType::BFLOAT16).to_layout(Layout::ROW_MAJOR).cpu();
Tensor filter_tensor =
ttnn::random::random(filter_tensor_shape, DataType::BFLOAT16).to_layout(Layout::ROW_MAJOR).cpu();
auto input_padded_tensor_buf = owned_buffer::get_as<bfloat16>(input_padded_tensor);
auto filter_tensor_buf = owned_buffer::get_as<bfloat16>(filter_tensor);
ttnn::random::random(input_tensor_shape, ttnn::DataType::BFLOAT16).to_layout(ttnn::Layout::ROW_MAJOR).cpu();
Tensor filter_tensor = ttnn::random::random(filter_tensor_shape, ttnn::DataType::BFLOAT16)
.to_layout(ttnn::Layout::ROW_MAJOR)
.cpu();
auto input_padded_tensor_buf = tt::tt_metal::owned_buffer::get_as<bfloat16>(input_padded_tensor);
auto filter_tensor_buf = tt::tt_metal::owned_buffer::get_as<bfloat16>(filter_tensor);

vector<float> filter_vector = create_filter_vec(filter_tensor_buf, tc.filter_h, tc.filter_w);
owned_buffer::Buffer<bfloat16> out_golden_tensor_buf = ref_conv_op(
tt::tt_metal::owned_buffer::Buffer<bfloat16> out_golden_tensor_buf = ref_conv_op(
input_padded_tensor,
input_tensor_shape,
tc.stride_h,
Expand Down
26 changes: 16 additions & 10 deletions tests/tt_eager/tensors/test_raw_host_memory_pointer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include <tt-metalium/bfloat16.hpp>
#include <tt-metalium/constants.hpp>
#include "ttnn/tensor/enum_types.hpp"
#include "ttnn/tensor/host_buffer/functions.hpp"
#include "ttnn/tensor/host_buffer/types.hpp"
#include "ttnn/tensor/tensor.hpp"
Expand Down Expand Up @@ -56,6 +57,7 @@ struct NDArray {
void test_raw_host_memory_pointer() {
using tt::tt_metal::BorrowedStorage;
using tt::tt_metal::DataType;
using tt::tt_metal::Layout;
using tt::tt_metal::OwnedStorage;
using tt::tt_metal::Tensor;
using namespace tt::tt_metal::borrowed_buffer;
Expand All @@ -67,8 +69,11 @@ void test_raw_host_memory_pointer() {
ttnn::Shape shape({1, 1, tt::constants::TILE_HEIGHT, tt::constants::TILE_WIDTH});

// Host tensor to print the output
Tensor tensor_for_printing =
Tensor(OwnedStorage{owned_buffer::create<bfloat16>(shape.volume())}, shape, DataType::BFLOAT16, Layout::TILE);
Tensor tensor_for_printing = Tensor(
OwnedStorage{tt::tt_metal::owned_buffer::create<bfloat16>(shape.volume())},
shape,
DataType::BFLOAT16,
Layout::TILE);

/* Borrow Data from Numpy Start */
// Create some
Expand All @@ -78,7 +83,7 @@ void test_raw_host_memory_pointer() {
auto on_destruction_callback = [] {};
Tensor a_cpu = Tensor(
BorrowedStorage{
borrowed_buffer::Buffer(static_cast<bfloat16*>(a_np_array_data), a_np_array.size()),
tt::tt_metal::borrowed_buffer::Buffer(static_cast<bfloat16*>(a_np_array_data), a_np_array.size()),
on_creation_callback,
on_destruction_callback},
shape,
Expand All @@ -93,7 +98,7 @@ void test_raw_host_memory_pointer() {
// Set every value of tt Tensor to the same non-zero number
bfloat16 a_value = 4.0f;

for (auto& element : borrowed_buffer::get_as<bfloat16>(a_cpu)) {
for (auto& element : tt::tt_metal::borrowed_buffer::get_as<bfloat16>(a_cpu)) {
element = a_value;
}

Expand All @@ -113,7 +118,7 @@ void test_raw_host_memory_pointer() {

// Check that cpu tensor has correct data
bfloat16 output_value = 1.99219f; // Not exactly 2.0f because of rounding errors
for (auto& element : owned_buffer::get_as<bfloat16>(tensor_for_printing)) {
for (auto& element : tt::tt_metal::owned_buffer::get_as<bfloat16>(tensor_for_printing)) {
TT_ASSERT(element == output_value);
}

Expand All @@ -128,15 +133,16 @@ void test_raw_host_memory_pointer() {

Tensor alternative_tensor_for_printing = Tensor(
BorrowedStorage{
borrowed_buffer::Buffer(static_cast<bfloat16*>(storage_of_alternative_tensor_for_printing), shape.volume()),
tt::tt_metal::borrowed_buffer::Buffer(
static_cast<bfloat16*>(storage_of_alternative_tensor_for_printing), shape.volume()),
on_creation_callback,
on_destruction_callback},
shape,
DataType::BFLOAT16,
Layout::TILE);
alternative_tensor_for_printing.print();

for (auto& element : borrowed_buffer::get_as<bfloat16>(alternative_tensor_for_printing)) {
for (auto& element : tt::tt_metal::borrowed_buffer::get_as<bfloat16>(alternative_tensor_for_printing)) {
TT_ASSERT(element == output_value);
}

Expand All @@ -147,15 +153,15 @@ void test_raw_host_memory_pointer() {
void* d_np_array_data = d_np_array.data;
Tensor d_cpu = Tensor(
BorrowedStorage{
borrowed_buffer::Buffer(static_cast<bfloat16*>(d_np_array_data), d_np_array.size()),
tt::tt_metal::borrowed_buffer::Buffer(static_cast<bfloat16*>(d_np_array_data), d_np_array.size()),
on_creation_callback,
on_destruction_callback},
shape,
DataType::BFLOAT16,
Layout::TILE);

bfloat16 d_value = 8.0f;
for (auto& element : borrowed_buffer::get_as<bfloat16>(d_cpu)) {
for (auto& element : tt::tt_metal::borrowed_buffer::get_as<bfloat16>(d_cpu)) {
element = d_value;
}

Expand All @@ -166,7 +172,7 @@ void test_raw_host_memory_pointer() {

tt::tt_metal::memcpy(tensor_for_printing, e_dev);

for (auto& element : owned_buffer::get_as<bfloat16>(tensor_for_printing)) {
for (auto& element : tt::tt_metal::owned_buffer::get_as<bfloat16>(tensor_for_printing)) {
TT_ASSERT(element == bfloat16(10.0f));
}

Expand Down
1 change: 1 addition & 0 deletions tests/tt_metal/distributed/test_mesh_sub_device.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
namespace tt::tt_metal::distributed::test {
namespace {

using namespace tt::tt_metal;
using MeshSubDeviceTestSuite = GenericMeshDeviceFixture;

TEST_F(MeshSubDeviceTestSuite, SyncWorkloadsOnSubDevice) {
Expand Down
2 changes: 2 additions & 0 deletions tests/tt_metal/test_utils/test_common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
#include <sstream>
#include "metal_soc_descriptor.h"

using namespace tt::tt_metal; // test only

namespace test_args {

template <class T>
Expand Down
4 changes: 2 additions & 2 deletions tests/tt_metal/tt_fabric/common/fabric_fixture.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ class ControlPlaneFixture : public ::testing::Test {
class FabricFixture : public ::testing::Test {
protected:
tt::ARCH arch_;
std::map<chip_id_t, IDevice*> devices_map_;
std::vector<IDevice*> devices_;
std::map<chip_id_t, tt::tt_metal::IDevice*> devices_map_;
std::vector<tt::tt_metal::IDevice*> devices_;
bool slow_dispatch_;

void SetUp() override {
Expand Down
Loading

0 comments on commit 815de5e

Please sign in to comment.