From fd0dc12ab497fe2ffa00964eb3ddee563e9e25f9 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 15 Jan 2025 20:38:28 +0000 Subject: [PATCH 01/40] Added ModuleCacher to cache modules using Action hooks. --- include/ttmlir/RegisterAll.h | 2 ++ lib/RegisterAll.cpp | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/include/ttmlir/RegisterAll.h b/include/ttmlir/RegisterAll.h index 5890299cc2..9be631a94f 100644 --- a/include/ttmlir/RegisterAll.h +++ b/include/ttmlir/RegisterAll.h @@ -17,6 +17,8 @@ void registerAllDialects(mlir::DialectRegistry ®istry); void registerAllExtensions(mlir::DialectRegistry ®istry); void registerAllPasses(); +struct MLIRModuleCacher; + } // namespace mlir::tt #endif diff --git a/lib/RegisterAll.cpp b/lib/RegisterAll.cpp index db8636f07c..4e0fe5cc87 100644 --- a/lib/RegisterAll.cpp +++ b/lib/RegisterAll.cpp @@ -5,7 +5,9 @@ #include "ttmlir/RegisterAll.h" #include "mlir/Dialect/Func/Extensions/InlinerExtension.h" +#include "mlir/IR/Action.h" #include "mlir/IR/DialectRegistry.h" +#include "mlir/IR/MLIRContext.h" #include "mlir/InitAllDialects.h" #include "mlir/InitAllPasses.h" #include "ttmlir/Conversion/Passes.h" @@ -77,3 +79,27 @@ void mlir::tt::registerAllPasses() { mlir::tt::ttnn::registerTTNNPipelines(); mlir::tt::ttmetal::registerTTMetalPipelines(); } + +// Create a larger MLIRModuleCache object which initializes the hook to cache +// provided MLIRContext + +struct mlir::tt::MLIRModuleCacher { + mlir::MLIRContext *context; + llvm::StringMap moduleCache; + + void attachContext(mlir::MLIRContext *ctx) { + context = ctx; + + context->registerActionHandler([this](llvm::function_ref transform, + const mlir::tracing::Action &action) { + if (mlir::isa(action)) { + auto passAction = mlir::cast(action); + // A Pass action has occured, need to store the previous module before + // transform is completed. + this->moduleCache[passAction.getPass().getName().str()] = + passAction.getOp(); + } + transform(); // Run the transformation pass. + }); + } +}; From 435d8698e432aeb0b9eeef6405996bda6e14bf6b Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 15 Jan 2025 21:46:26 +0000 Subject: [PATCH 02/40] Moved to Conversion/Passes.h --- include/ttmlir/Conversion/Passes.h | 21 +++++++++++++++++++++ include/ttmlir/RegisterAll.h | 2 -- lib/RegisterAll.cpp | 24 ------------------------ 3 files changed, 21 insertions(+), 26 deletions(-) diff --git a/include/ttmlir/Conversion/Passes.h b/include/ttmlir/Conversion/Passes.h index 4c7464d889..c2b2200ecc 100644 --- a/include/ttmlir/Conversion/Passes.h +++ b/include/ttmlir/Conversion/Passes.h @@ -26,6 +26,27 @@ namespace mlir::tt { #define GEN_PASS_REGISTRATION #include "ttmlir/Conversion/Passes.h.inc" +struct MLIRModuleCacher { + mlir::MLIRContext *context; + llvm::StringMap moduleCache; + + void attachContext(mlir::MLIRContext *ctx) { + context = ctx; + + context->registerActionHandler([this](llvm::function_ref transform, + const mlir::tracing::Action &action) { + if (mlir::isa(action)) { + auto passAction = mlir::cast(action); + // A Pass action has occured, need to store the previous module before + // transform is completed. + this->moduleCache[passAction.getPass().getName().str()] = + passAction.getOp(); + } + transform(); // Run the transformation pass. + }); + } +}; + } // namespace mlir::tt #endif // TTMLIR_CONVERSION_PASSES_H diff --git a/include/ttmlir/RegisterAll.h b/include/ttmlir/RegisterAll.h index 9be631a94f..5890299cc2 100644 --- a/include/ttmlir/RegisterAll.h +++ b/include/ttmlir/RegisterAll.h @@ -17,8 +17,6 @@ void registerAllDialects(mlir::DialectRegistry ®istry); void registerAllExtensions(mlir::DialectRegistry ®istry); void registerAllPasses(); -struct MLIRModuleCacher; - } // namespace mlir::tt #endif diff --git a/lib/RegisterAll.cpp b/lib/RegisterAll.cpp index 4e0fe5cc87..d4f9bea202 100644 --- a/lib/RegisterAll.cpp +++ b/lib/RegisterAll.cpp @@ -79,27 +79,3 @@ void mlir::tt::registerAllPasses() { mlir::tt::ttnn::registerTTNNPipelines(); mlir::tt::ttmetal::registerTTMetalPipelines(); } - -// Create a larger MLIRModuleCache object which initializes the hook to cache -// provided MLIRContext - -struct mlir::tt::MLIRModuleCacher { - mlir::MLIRContext *context; - llvm::StringMap moduleCache; - - void attachContext(mlir::MLIRContext *ctx) { - context = ctx; - - context->registerActionHandler([this](llvm::function_ref transform, - const mlir::tracing::Action &action) { - if (mlir::isa(action)) { - auto passAction = mlir::cast(action); - // A Pass action has occured, need to store the previous module before - // transform is completed. - this->moduleCache[passAction.getPass().getName().str()] = - passAction.getOp(); - } - transform(); // Run the transformation pass. - }); - } -}; From 25a0bfdc50d988b66672b40562f1f75b610d0e85 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Thu, 16 Jan 2025 17:55:33 +0000 Subject: [PATCH 03/40] Clone Op --- include/ttmlir/Conversion/Passes.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/ttmlir/Conversion/Passes.h b/include/ttmlir/Conversion/Passes.h index c2b2200ecc..a59c3a4c26 100644 --- a/include/ttmlir/Conversion/Passes.h +++ b/include/ttmlir/Conversion/Passes.h @@ -40,7 +40,7 @@ struct MLIRModuleCacher { // A Pass action has occured, need to store the previous module before // transform is completed. this->moduleCache[passAction.getPass().getName().str()] = - passAction.getOp(); + passAction.getOp()->clone(); } transform(); // Run the transformation pass. }); From b119b72022c0a9c4498ba2fd6987376080a5a91b Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Thu, 16 Jan 2025 19:52:34 +0000 Subject: [PATCH 04/40] Switched to strings instead of Op Clones --- include/ttmlir/Conversion/Passes.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/include/ttmlir/Conversion/Passes.h b/include/ttmlir/Conversion/Passes.h index a59c3a4c26..8d6af63fb1 100644 --- a/include/ttmlir/Conversion/Passes.h +++ b/include/ttmlir/Conversion/Passes.h @@ -28,7 +28,7 @@ namespace mlir::tt { struct MLIRModuleCacher { mlir::MLIRContext *context; - llvm::StringMap moduleCache; + llvm::StringMap moduleCache; void attachContext(mlir::MLIRContext *ctx) { context = ctx; @@ -39,8 +39,15 @@ struct MLIRModuleCacher { auto passAction = mlir::cast(action); // A Pass action has occured, need to store the previous module before // transform is completed. - this->moduleCache[passAction.getPass().getName().str()] = - passAction.getOp()->clone(); + + std::string outString; + llvm::raw_string_ostream os(outString); + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(); + passAction.getOp()->print(os, flags); + os.flush(); + + this->moduleCache[passAction.getPass().getName().str()] = outString; } transform(); // Run the transformation pass. }); From ce1294db83fd285783ab1e4767fbfb09285a36b4 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Thu, 16 Jan 2025 20:21:42 +0000 Subject: [PATCH 05/40] Moved from map to vector --- include/ttmlir/Conversion/Passes.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/include/ttmlir/Conversion/Passes.h b/include/ttmlir/Conversion/Passes.h index 8d6af63fb1..1bc29b751f 100644 --- a/include/ttmlir/Conversion/Passes.h +++ b/include/ttmlir/Conversion/Passes.h @@ -28,7 +28,8 @@ namespace mlir::tt { struct MLIRModuleCacher { mlir::MLIRContext *context; - llvm::StringMap moduleCache; + std::vector> moduleCache; + // llvm::StringMap moduleCache; void attachContext(mlir::MLIRContext *ctx) { context = ctx; @@ -47,7 +48,9 @@ struct MLIRModuleCacher { passAction.getOp()->print(os, flags); os.flush(); - this->moduleCache[passAction.getPass().getName().str()] = outString; + this->moduleCache.emplace_back(passAction.getPass().getName().str(), + outString); + //[passAction.getPass().getName().str()] = outString; } transform(); // Run the transformation pass. }); From 693e5a992c33566a336bbb12e5ab8f744a525904 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Fri, 17 Jan 2025 19:49:28 +0000 Subject: [PATCH 06/40] Added ModuleCache to FB Schema --- include/ttmlir/Conversion/Passes.h | 49 ++++++++++--------- include/ttmlir/Target/Common/debug_info.fbs | 10 ++++ include/ttmlir/Target/TTNN/TTNNToFlatbuffer.h | 9 ++-- lib/Target/TTNN/TTNNToFlatbuffer.cpp | 25 ++++++++-- 4 files changed, 62 insertions(+), 31 deletions(-) diff --git a/include/ttmlir/Conversion/Passes.h b/include/ttmlir/Conversion/Passes.h index 1bc29b751f..1c73fc6712 100644 --- a/include/ttmlir/Conversion/Passes.h +++ b/include/ttmlir/Conversion/Passes.h @@ -29,31 +29,36 @@ namespace mlir::tt { struct MLIRModuleCacher { mlir::MLIRContext *context; std::vector> moduleCache; - // llvm::StringMap moduleCache; - void attachContext(mlir::MLIRContext *ctx) { + void attachContext(mlir::MLIRContext *ctx, + std::vector passNamesToCache = {}) { context = ctx; - context->registerActionHandler([this](llvm::function_ref transform, - const mlir::tracing::Action &action) { - if (mlir::isa(action)) { - auto passAction = mlir::cast(action); - // A Pass action has occured, need to store the previous module before - // transform is completed. - - std::string outString; - llvm::raw_string_ostream os(outString); - mlir::OpPrintingFlags flags; - flags.enableDebugInfo(); - passAction.getOp()->print(os, flags); - os.flush(); - - this->moduleCache.emplace_back(passAction.getPass().getName().str(), - outString); - //[passAction.getPass().getName().str()] = outString; - } - transform(); // Run the transformation pass. - }); + context->registerActionHandler( + [this, passNamesToCache](llvm::function_ref transform, + const mlir::tracing::Action &action) { + if (mlir::isa(action)) { + auto passAction = mlir::cast(action); + // A Pass action has occured, need to store the previous module + // before transform is completed. + + std::string outString; + llvm::raw_string_ostream os(outString); + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(); + passAction.getOp()->print(os, flags); + os.flush(); + + std::string passName = passAction.getPass().getName().str(); + + if (not passNamesToCache.empty() and + std::find(passNamesToCache.begin(), passNamesToCache.end(), + passName) != passNamesToCache.end()) { + this->moduleCache.emplace_back(passName, outString); + } + } + transform(); // Run the transformation pass. + }); } }; diff --git a/include/ttmlir/Target/Common/debug_info.fbs b/include/ttmlir/Target/Common/debug_info.fbs index 468fb24f1a..1e1ee829cc 100644 --- a/include/ttmlir/Target/Common/debug_info.fbs +++ b/include/ttmlir/Target/Common/debug_info.fbs @@ -24,8 +24,18 @@ table MLIR { source: string; } +table ModuleCacheItem { + pass_name: string; + module: string; +} + +table ModuleCache { + module_cache: [ModuleCacheItem]; +} + table DebugInfo { mlir: MLIR; cpp: string; golden_info: GoldenInfo; + module_cache: ModuleCache; } diff --git a/include/ttmlir/Target/TTNN/TTNNToFlatbuffer.h b/include/ttmlir/Target/TTNN/TTNNToFlatbuffer.h index 77cc45cfa5..360c52aa0b 100644 --- a/include/ttmlir/Target/TTNN/TTNNToFlatbuffer.h +++ b/include/ttmlir/Target/TTNN/TTNNToFlatbuffer.h @@ -12,16 +12,17 @@ namespace mlir::tt::ttnn { // Convert a TTNNIR operation to a flatbuffer -std::shared_ptr -ttnnToFlatbuffer(Operation *op, - std::unordered_map goldenMap = {}); +std::shared_ptr ttnnToFlatbuffer( + Operation *op, std::unordered_map goldenMap = {}, + std::vector> moduleCache = {}); // Convert a TTNNIR operation to a flatbuffer // This function signature is required in order to register the conversion in // mlir translation framework LogicalResult translateTTNNToFlatbuffer( Operation *op, llvm::raw_ostream &os, - std::unordered_map goldenMap = {}); + std::unordered_map goldenMap = {}, + std::vector> moduleCache = {}); } // namespace mlir::tt::ttnn #endif diff --git a/lib/Target/TTNN/TTNNToFlatbuffer.cpp b/lib/Target/TTNN/TTNNToFlatbuffer.cpp index 1709443113..d14d98eb19 100644 --- a/lib/Target/TTNN/TTNNToFlatbuffer.cpp +++ b/lib/Target/TTNN/TTNNToFlatbuffer.cpp @@ -1224,7 +1224,8 @@ emitTTNNOperation(FlatbufferObjectCache &cache, Operation *op, std::shared_ptr ttnnToFlatbuffer(Operation *op, - std::unordered_map goldenMap) { + std::unordered_map goldenMap, + std::vector> moduleCache) { ModuleOp module = dyn_cast(op); assert(module && "Expected ModuleOp as top level operation"); @@ -1258,9 +1259,22 @@ ttnnToFlatbuffer(Operation *op, goldenKVList.push_back(goldenKV); } + // Load the ModuleCache if present and populate DebugInfo + std::vector<::flatbuffers::Offset<::tt::target::ModuleCacheItem>> + moduleCacheList; + moduleCacheList.reserve(moduleCache.size()); + + for (const auto &item : moduleCache) { + auto moduleCacheItem = ::tt::target::CreateModuleCacheItemDirect( + fbb, item.first.c_str(), item.second.c_str()); + moduleCacheList.push_back(moduleCacheItem); + } + + auto moduleCacheFbb = + ::tt::target::CreateModuleCacheDirect(fbb, &moduleCacheList); auto goldenInfo = ::tt::target::CreateGoldenInfoDirect(fbb, &goldenKVList); - auto debugInfo = - ::tt::target::CreateDebugInfoDirect(fbb, mlir, cpp.c_str(), goldenInfo); + auto debugInfo = ::tt::target::CreateDebugInfoDirect( + fbb, mlir, cpp.c_str(), goldenInfo, moduleCacheFbb); std::vector<::flatbuffers::Offset<::tt::target::ttnn::Program>> programs; module->walk([&](func::FuncOp func) { @@ -1290,8 +1304,9 @@ ttnnToFlatbuffer(Operation *op, LogicalResult translateTTNNToFlatbuffer( Operation *op, llvm::raw_ostream &os, - std::unordered_map goldenMap) { - std::shared_ptr data = ttnnToFlatbuffer(op, goldenMap); + std::unordered_map goldenMap, + std::vector> moduleCache) { + std::shared_ptr data = ttnnToFlatbuffer(op, goldenMap, moduleCache); std::size_t size = ::flatbuffers::GetSizePrefixedBufferLength( static_cast(data.get())); os.write(reinterpret_cast(data.get()), size); From 2ca9747ff47cc4ebbcf290a7d4ef15dc6eb4ceef Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Fri, 17 Jan 2025 20:18:27 +0000 Subject: [PATCH 07/40] Debugging ModuleCacher --- include/ttmlir/Conversion/Passes.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/ttmlir/Conversion/Passes.h b/include/ttmlir/Conversion/Passes.h index 1c73fc6712..6bc712ada5 100644 --- a/include/ttmlir/Conversion/Passes.h +++ b/include/ttmlir/Conversion/Passes.h @@ -56,6 +56,8 @@ struct MLIRModuleCacher { passName) != passNamesToCache.end()) { this->moduleCache.emplace_back(passName, outString); } + } else { + this->moduleCache.emplace_back(action.getTag().str(), ""); } transform(); // Run the transformation pass. }); From b71cb5f9ce1266e8981b9abb1a79636f1665e809 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Fri, 17 Jan 2025 20:26:50 +0000 Subject: [PATCH 08/40] Stupid bug fix --- include/ttmlir/Conversion/Passes.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/ttmlir/Conversion/Passes.h b/include/ttmlir/Conversion/Passes.h index 6bc712ada5..452bd80345 100644 --- a/include/ttmlir/Conversion/Passes.h +++ b/include/ttmlir/Conversion/Passes.h @@ -55,9 +55,9 @@ struct MLIRModuleCacher { std::find(passNamesToCache.begin(), passNamesToCache.end(), passName) != passNamesToCache.end()) { this->moduleCache.emplace_back(passName, outString); + } else if (passNamesToCache.empty()) { + this->moduleCache.emplace_back(passName, outString); } - } else { - this->moduleCache.emplace_back(action.getTag().str(), ""); } transform(); // Run the transformation pass. }); From df7062a63826ad46d557f74c63b287b9f9f86137 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Tue, 21 Jan 2025 16:17:46 +0000 Subject: [PATCH 09/40] Interim --- .../tt_adapter/src/tt_adapter/utils.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/tools/explorer/tt_adapter/src/tt_adapter/utils.py b/tools/explorer/tt_adapter/src/tt_adapter/utils.py index 9d69344bea..ae473f7d00 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/utils.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/utils.py @@ -4,6 +4,11 @@ import ttmlir from dataclasses import make_dataclass, is_dataclass, asdict +import importlib +import logging + +TTRT_INSTALLED = importlib.util.find_spec("ttrt") is not None + def parse_mlir_file(model_path): with ttmlir.ir.Context() as ctx, open(model_path, "r") as model_file: @@ -14,6 +19,18 @@ def parse_mlir_file(model_path): return module +def parse_flatbuffer_file(fb_path): + if not TTRT_INSTALLED: + logging.error( + "TTRT is not installed in Python Environment, unable to parse Flatbuffer" + ) + return None + + from ttrt.common.util import Binary + + fbb = Binary(file_path=fb_path) + + def to_dataclass(obj: dict, dc_name: str = "tempClass"): return make_dataclass(dc_name, ((k, type(v)) for k, v in obj.items()))(**obj) @@ -36,4 +53,4 @@ def to_adapter_format(*objs): def make_editable_kv(kv, editable): obj = asdict(kv) obj["editable"] = editable - return make_dataclass("KeyValue", ((k, type(v)) for k, v in obj.items()))(**obj) + return to_dataclass(obj, "KeyValue") From 11ebc9de96c05bedc2933bf335ced8617e387038 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 22 Jan 2025 20:30:15 +0000 Subject: [PATCH 10/40] Suggested Changes + Starting Explorer Integration --- include/ttmlir/Conversion/Passes.h | 22 +++++++--------- include/ttmlir/Target/Common/debug_info.fbs | 11 +------- include/ttmlir/Target/TTNN/TTNNToFlatbuffer.h | 9 ++++--- lib/Target/TTNN/TTNNToFlatbuffer.cpp | 22 +++++++--------- .../tt_adapter/src/tt_adapter/utils.py | 26 ++++++++++++++++--- 5 files changed, 49 insertions(+), 41 deletions(-) diff --git a/include/ttmlir/Conversion/Passes.h b/include/ttmlir/Conversion/Passes.h index 452bd80345..2b7890f883 100644 --- a/include/ttmlir/Conversion/Passes.h +++ b/include/ttmlir/Conversion/Passes.h @@ -26,7 +26,7 @@ namespace mlir::tt { #define GEN_PASS_REGISTRATION #include "ttmlir/Conversion/Passes.h.inc" -struct MLIRModuleCacher { +struct MLIRModuleLogger { mlir::MLIRContext *context; std::vector> moduleCache; @@ -41,21 +41,19 @@ struct MLIRModuleCacher { auto passAction = mlir::cast(action); // A Pass action has occured, need to store the previous module // before transform is completed. - - std::string outString; - llvm::raw_string_ostream os(outString); - mlir::OpPrintingFlags flags; - flags.enableDebugInfo(); - passAction.getOp()->print(os, flags); - os.flush(); - std::string passName = passAction.getPass().getName().str(); - if (not passNamesToCache.empty() and + if (passNamesToCache.empty() or std::find(passNamesToCache.begin(), passNamesToCache.end(), passName) != passNamesToCache.end()) { - this->moduleCache.emplace_back(passName, outString); - } else if (passNamesToCache.empty()) { + + std::string outString; + llvm::raw_string_ostream os(outString); + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(); + passAction.getOp()->print(os, flags); + os.flush(); + this->moduleCache.emplace_back(passName, outString); } } diff --git a/include/ttmlir/Target/Common/debug_info.fbs b/include/ttmlir/Target/Common/debug_info.fbs index 1e1ee829cc..6f4a8b2a40 100644 --- a/include/ttmlir/Target/Common/debug_info.fbs +++ b/include/ttmlir/Target/Common/debug_info.fbs @@ -24,18 +24,9 @@ table MLIR { source: string; } -table ModuleCacheItem { - pass_name: string; - module: string; -} - -table ModuleCache { - module_cache: [ModuleCacheItem]; -} - table DebugInfo { mlir: MLIR; cpp: string; golden_info: GoldenInfo; - module_cache: ModuleCache; + mlir_stages: [MLIR]; } diff --git a/include/ttmlir/Target/TTNN/TTNNToFlatbuffer.h b/include/ttmlir/Target/TTNN/TTNNToFlatbuffer.h index 360c52aa0b..0712f08208 100644 --- a/include/ttmlir/Target/TTNN/TTNNToFlatbuffer.h +++ b/include/ttmlir/Target/TTNN/TTNNToFlatbuffer.h @@ -13,16 +13,17 @@ namespace mlir::tt::ttnn { // Convert a TTNNIR operation to a flatbuffer std::shared_ptr ttnnToFlatbuffer( - Operation *op, std::unordered_map goldenMap = {}, - std::vector> moduleCache = {}); + Operation *op, + const std::unordered_map &goldenMap = {}, + const std::vector> &moduleCache = {}); // Convert a TTNNIR operation to a flatbuffer // This function signature is required in order to register the conversion in // mlir translation framework LogicalResult translateTTNNToFlatbuffer( Operation *op, llvm::raw_ostream &os, - std::unordered_map goldenMap = {}, - std::vector> moduleCache = {}); + const std::unordered_map &goldenMap = {}, + const std::vector> &moduleCache = {}); } // namespace mlir::tt::ttnn #endif diff --git a/lib/Target/TTNN/TTNNToFlatbuffer.cpp b/lib/Target/TTNN/TTNNToFlatbuffer.cpp index d14d98eb19..043b52f61a 100644 --- a/lib/Target/TTNN/TTNNToFlatbuffer.cpp +++ b/lib/Target/TTNN/TTNNToFlatbuffer.cpp @@ -1222,10 +1222,10 @@ emitTTNNOperation(FlatbufferObjectCache &cache, Operation *op, llvm_unreachable("unhandled op in emitTTNNOperation"); } -std::shared_ptr -ttnnToFlatbuffer(Operation *op, - std::unordered_map goldenMap, - std::vector> moduleCache) { +std::shared_ptr ttnnToFlatbuffer( + Operation *op, + const std::unordered_map &goldenMap, + const std::vector> &moduleCache) { ModuleOp module = dyn_cast(op); assert(module && "Expected ModuleOp as top level operation"); @@ -1260,21 +1260,19 @@ ttnnToFlatbuffer(Operation *op, } // Load the ModuleCache if present and populate DebugInfo - std::vector<::flatbuffers::Offset<::tt::target::ModuleCacheItem>> - moduleCacheList; + std::vector<::flatbuffers::Offset<::tt::target::MLIR>> moduleCacheList; moduleCacheList.reserve(moduleCache.size()); for (const auto &item : moduleCache) { - auto moduleCacheItem = ::tt::target::CreateModuleCacheItemDirect( + // Here the Name is the Pass Name and Source is the IR itself + auto moduleCacheItem = ::tt::target::CreateMLIRDirect( fbb, item.first.c_str(), item.second.c_str()); moduleCacheList.push_back(moduleCacheItem); } - auto moduleCacheFbb = - ::tt::target::CreateModuleCacheDirect(fbb, &moduleCacheList); auto goldenInfo = ::tt::target::CreateGoldenInfoDirect(fbb, &goldenKVList); auto debugInfo = ::tt::target::CreateDebugInfoDirect( - fbb, mlir, cpp.c_str(), goldenInfo, moduleCacheFbb); + fbb, mlir, cpp.c_str(), goldenInfo, &moduleCacheList); std::vector<::flatbuffers::Offset<::tt::target::ttnn::Program>> programs; module->walk([&](func::FuncOp func) { @@ -1304,8 +1302,8 @@ ttnnToFlatbuffer(Operation *op, LogicalResult translateTTNNToFlatbuffer( Operation *op, llvm::raw_ostream &os, - std::unordered_map goldenMap, - std::vector> moduleCache) { + const std::unordered_map &goldenMap, + const std::vector> &moduleCache) { std::shared_ptr data = ttnnToFlatbuffer(op, goldenMap, moduleCache); std::size_t size = ::flatbuffers::GetSizePrefixedBufferLength( static_cast(data.get())); diff --git a/tools/explorer/tt_adapter/src/tt_adapter/utils.py b/tools/explorer/tt_adapter/src/tt_adapter/utils.py index ae473f7d00..28886be3b2 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/utils.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/utils.py @@ -19,16 +19,36 @@ def parse_mlir_file(model_path): return module -def parse_flatbuffer_file(fb_path): +def parse_flatbuffer_file(fb_path, at_pass=None): if not TTRT_INSTALLED: logging.error( "TTRT is not installed in Python Environment, unable to parse Flatbuffer" ) return None - from ttrt.common.util import Binary + from ttrt.common.util import Binary, Logger, FileManager - fbb = Binary(file_path=fb_path) + logger = Logger() + file_manager = FileManager(logger) + + fbb = Binary(logger, file_manager, fb_path) + # This will load a Binary that we will parse to see if the correct attributes are present + # Get the Flatbuffer as a Dict + fbb_dict = fbb.fbb_dict + if "mlir_stages" not in fbb_dict: + # MLIR Stages not present + logging.error( + "Flatbuffer does not contain Cached Module, invalid for explorer." + ) + return None + + cached_modules = fbb_dict["mlir_stages"] + for (pass_name, module) in cached_modules: + if pass_name == at_pass: + return module + + logging.error("at_pass=%s not found in Flatbuffer.") + return None def to_dataclass(obj: dict, dc_name: str = "tempClass"): From 57e9477e5dc2ccb2652e95466f813994df4f1147 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Thu, 23 Jan 2025 14:30:08 +0000 Subject: [PATCH 11/40] Added a thing --- lib/Target/TTNN/TTNNToFlatbuffer.cpp | 2 +- lib/Target/TTNN/TTNNToFlatbufferRegistration.cpp | 2 +- tools/explorer/tt_adapter/src/tt_adapter/main.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/Target/TTNN/TTNNToFlatbuffer.cpp b/lib/Target/TTNN/TTNNToFlatbuffer.cpp index 043b52f61a..a21792c43f 100644 --- a/lib/Target/TTNN/TTNNToFlatbuffer.cpp +++ b/lib/Target/TTNN/TTNNToFlatbuffer.cpp @@ -1249,7 +1249,7 @@ std::shared_ptr ttnnToFlatbuffer( std::vector<::flatbuffers::Offset<::tt::target::GoldenKV>> goldenKVList; goldenKVList.reserve(goldenMap.size()); - for (auto element : goldenMap) { + for (const auto &element : goldenMap) { std::vector dataTensor = element.second.convertDataToVector(); auto goldenTensor = ::tt::target::CreateGoldenTensorDirect( fbb, element.second.name.c_str(), &element.second.shape, diff --git a/lib/Target/TTNN/TTNNToFlatbufferRegistration.cpp b/lib/Target/TTNN/TTNNToFlatbufferRegistration.cpp index 243c2b7e37..791ab52260 100644 --- a/lib/Target/TTNN/TTNNToFlatbufferRegistration.cpp +++ b/lib/Target/TTNN/TTNNToFlatbufferRegistration.cpp @@ -19,7 +19,7 @@ void registerTTNNToFlatbuffer() { TranslateFromMLIRRegistration reg( "ttnn-to-flatbuffer", "translate ttnn to flatbuffer", [](Operation *op, llvm::raw_ostream &os) -> LogicalResult { - return translateTTNNToFlatbuffer(op, os, {}); + return translateTTNNToFlatbuffer(op, os, {}, {}); }, [](DialectRegistry ®istry) { // clang-format off diff --git a/tools/explorer/tt_adapter/src/tt_adapter/main.py b/tools/explorer/tt_adapter/src/tt_adapter/main.py index d494721ae0..f8e25b84d9 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/main.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/main.py @@ -74,7 +74,7 @@ class TTAdapter(model_explorer.Adapter): name="Tenstorrent MLIR Adapter", description="Adapter for Tenstorrent MLIR dialects used in the Forge compiler.", source_repo="https://github.com/tenstorrent/tt-mlir/tree/main/tools/explorer/tt_adapter", - fileExts=["mlir", "ttir"], + fileExts=["mlir", "ttir", "ttnn"], settings={ "optimizationPolicies": list(OPTIMIZATION_POLICIES.keys()), }, From d6ed7baa6463cf98b94960c9f9b9f8cefe7e146d Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Fri, 24 Jan 2025 20:06:14 +0000 Subject: [PATCH 12/40] small fix --- lib/Target/TTNN/TTNNToFlatbuffer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Target/TTNN/TTNNToFlatbuffer.cpp b/lib/Target/TTNN/TTNNToFlatbuffer.cpp index a21792c43f..043b52f61a 100644 --- a/lib/Target/TTNN/TTNNToFlatbuffer.cpp +++ b/lib/Target/TTNN/TTNNToFlatbuffer.cpp @@ -1249,7 +1249,7 @@ std::shared_ptr ttnnToFlatbuffer( std::vector<::flatbuffers::Offset<::tt::target::GoldenKV>> goldenKVList; goldenKVList.reserve(goldenMap.size()); - for (const auto &element : goldenMap) { + for (auto element : goldenMap) { std::vector dataTensor = element.second.convertDataToVector(); auto goldenTensor = ::tt::target::CreateGoldenTensorDirect( fbb, element.second.name.c_str(), &element.second.shape, From 0d650ea584ab88202b4d285f958469c65184d225 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Fri, 24 Jan 2025 20:23:15 +0000 Subject: [PATCH 13/40] TestString in Schema --- include/ttmlir/Target/Common/debug_info.fbs | 1 + lib/Target/TTNN/TTNNToFlatbuffer.cpp | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/include/ttmlir/Target/Common/debug_info.fbs b/include/ttmlir/Target/Common/debug_info.fbs index 6f4a8b2a40..0c1940d905 100644 --- a/include/ttmlir/Target/Common/debug_info.fbs +++ b/include/ttmlir/Target/Common/debug_info.fbs @@ -29,4 +29,5 @@ table DebugInfo { cpp: string; golden_info: GoldenInfo; mlir_stages: [MLIR]; + test: string; } diff --git a/lib/Target/TTNN/TTNNToFlatbuffer.cpp b/lib/Target/TTNN/TTNNToFlatbuffer.cpp index 043b52f61a..b7b3de413e 100644 --- a/lib/Target/TTNN/TTNNToFlatbuffer.cpp +++ b/lib/Target/TTNN/TTNNToFlatbuffer.cpp @@ -1271,8 +1271,9 @@ std::shared_ptr ttnnToFlatbuffer( } auto goldenInfo = ::tt::target::CreateGoldenInfoDirect(fbb, &goldenKVList); + std::string testString = "Hello World!"; auto debugInfo = ::tt::target::CreateDebugInfoDirect( - fbb, mlir, cpp.c_str(), goldenInfo, &moduleCacheList); + fbb, mlir, cpp.c_str(), goldenInfo, &moduleCacheList, testString.c_str()); std::vector<::flatbuffers::Offset<::tt::target::ttnn::Program>> programs; module->walk([&](func::FuncOp func) { From 49b90af4be92e3f29198eba52adb3509d635d929 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Fri, 24 Jan 2025 20:37:18 +0000 Subject: [PATCH 14/40] TestString in Schema + CPP --- include/ttmlir/Target/Common/debug_info.fbs | 2 +- lib/Target/TTNN/TTNNToFlatbuffer.cpp | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/include/ttmlir/Target/Common/debug_info.fbs b/include/ttmlir/Target/Common/debug_info.fbs index 0c1940d905..33b363bd9b 100644 --- a/include/ttmlir/Target/Common/debug_info.fbs +++ b/include/ttmlir/Target/Common/debug_info.fbs @@ -24,10 +24,10 @@ table MLIR { source: string; } + table DebugInfo { mlir: MLIR; cpp: string; golden_info: GoldenInfo; - mlir_stages: [MLIR]; test: string; } diff --git a/lib/Target/TTNN/TTNNToFlatbuffer.cpp b/lib/Target/TTNN/TTNNToFlatbuffer.cpp index b7b3de413e..48a707e4f8 100644 --- a/lib/Target/TTNN/TTNNToFlatbuffer.cpp +++ b/lib/Target/TTNN/TTNNToFlatbuffer.cpp @@ -1271,9 +1271,8 @@ std::shared_ptr ttnnToFlatbuffer( } auto goldenInfo = ::tt::target::CreateGoldenInfoDirect(fbb, &goldenKVList); - std::string testString = "Hello World!"; - auto debugInfo = ::tt::target::CreateDebugInfoDirect( - fbb, mlir, cpp.c_str(), goldenInfo, &moduleCacheList, testString.c_str()); + auto debugInfo = ::tt::target::CreateDebugInfoDirect(fbb, mlir, cpp.c_str(), + goldenInfo, cpp.c_str()); std::vector<::flatbuffers::Offset<::tt::target::ttnn::Program>> programs; module->walk([&](func::FuncOp func) { From fd7a5a12c0e12c62ebbb056bf8c0954802efbeac Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Mon, 27 Jan 2025 16:48:53 +0000 Subject: [PATCH 15/40] Reverted to old schema --- include/ttmlir/Target/Common/debug_info.fbs | 2 +- lib/Target/TTNN/TTNNToFlatbuffer.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/ttmlir/Target/Common/debug_info.fbs b/include/ttmlir/Target/Common/debug_info.fbs index 33b363bd9b..af087d8ae6 100644 --- a/include/ttmlir/Target/Common/debug_info.fbs +++ b/include/ttmlir/Target/Common/debug_info.fbs @@ -29,5 +29,5 @@ table DebugInfo { mlir: MLIR; cpp: string; golden_info: GoldenInfo; - test: string; + module_cache: [MLIR]; } diff --git a/lib/Target/TTNN/TTNNToFlatbuffer.cpp b/lib/Target/TTNN/TTNNToFlatbuffer.cpp index 48a707e4f8..043b52f61a 100644 --- a/lib/Target/TTNN/TTNNToFlatbuffer.cpp +++ b/lib/Target/TTNN/TTNNToFlatbuffer.cpp @@ -1271,8 +1271,8 @@ std::shared_ptr ttnnToFlatbuffer( } auto goldenInfo = ::tt::target::CreateGoldenInfoDirect(fbb, &goldenKVList); - auto debugInfo = ::tt::target::CreateDebugInfoDirect(fbb, mlir, cpp.c_str(), - goldenInfo, cpp.c_str()); + auto debugInfo = ::tt::target::CreateDebugInfoDirect( + fbb, mlir, cpp.c_str(), goldenInfo, &moduleCacheList); std::vector<::flatbuffers::Offset<::tt::target::ttnn::Program>> programs; module->walk([&](func::FuncOp func) { From a3c0fa7a6f3c3650514b6d4ac17289f1e9f10957 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Mon, 27 Jan 2025 16:51:11 +0000 Subject: [PATCH 16/40] Rename --- include/ttmlir/Target/Common/debug_info.fbs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/ttmlir/Target/Common/debug_info.fbs b/include/ttmlir/Target/Common/debug_info.fbs index af087d8ae6..971742ac96 100644 --- a/include/ttmlir/Target/Common/debug_info.fbs +++ b/include/ttmlir/Target/Common/debug_info.fbs @@ -29,5 +29,5 @@ table DebugInfo { mlir: MLIR; cpp: string; golden_info: GoldenInfo; - module_cache: [MLIR]; + mlir_stages: [MLIR]; } From 2dbefd02d821f226a9580cf800325e66986b4446 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Mon, 27 Jan 2025 17:37:08 +0000 Subject: [PATCH 17/40] Reorder and Pray --- include/ttmlir/Target/Common/debug_info.fbs | 2 +- lib/Target/TTNN/TTNNToFlatbuffer.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/ttmlir/Target/Common/debug_info.fbs b/include/ttmlir/Target/Common/debug_info.fbs index 971742ac96..e250c13b60 100644 --- a/include/ttmlir/Target/Common/debug_info.fbs +++ b/include/ttmlir/Target/Common/debug_info.fbs @@ -28,6 +28,6 @@ table MLIR { table DebugInfo { mlir: MLIR; cpp: string; - golden_info: GoldenInfo; mlir_stages: [MLIR]; + golden_info: GoldenInfo; } diff --git a/lib/Target/TTNN/TTNNToFlatbuffer.cpp b/lib/Target/TTNN/TTNNToFlatbuffer.cpp index 043b52f61a..123ec0b976 100644 --- a/lib/Target/TTNN/TTNNToFlatbuffer.cpp +++ b/lib/Target/TTNN/TTNNToFlatbuffer.cpp @@ -1272,7 +1272,7 @@ std::shared_ptr ttnnToFlatbuffer( auto goldenInfo = ::tt::target::CreateGoldenInfoDirect(fbb, &goldenKVList); auto debugInfo = ::tt::target::CreateDebugInfoDirect( - fbb, mlir, cpp.c_str(), goldenInfo, &moduleCacheList); + fbb, mlir, cpp.c_str(), &moduleCacheList, goldenInfo); std::vector<::flatbuffers::Offset<::tt::target::ttnn::Program>> programs; module->walk([&](func::FuncOp func) { From 5a28cc50b92a6b29237e2032d46f39f003c242f8 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Tue, 28 Jan 2025 19:08:26 +0000 Subject: [PATCH 18/40] Added MLIR Stages support in TTIR Builder --- include/ttmlir/Conversion/Passes.h | 135 ++++++++++++++---- python/Passes.cpp | 82 ++++++++--- python/test_infra/test_utils.py | 18 ++- .../tt_adapter/src/tt_adapter/main.py | 15 +- .../tt_adapter/src/tt_adapter/runner.py | 63 ++++++-- .../tt_adapter/src/tt_adapter/utils.py | 68 ++++++++- 6 files changed, 311 insertions(+), 70 deletions(-) diff --git a/include/ttmlir/Conversion/Passes.h b/include/ttmlir/Conversion/Passes.h index 2b7890f883..af29338e78 100644 --- a/include/ttmlir/Conversion/Passes.h +++ b/include/ttmlir/Conversion/Passes.h @@ -31,34 +31,119 @@ struct MLIRModuleLogger { std::vector> moduleCache; void attachContext(mlir::MLIRContext *ctx, - std::vector passNamesToCache = {}) { + std::vector passNamesToCache = {}, + std::vector> + *passedModuleCache = nullptr) { context = ctx; - context->registerActionHandler( - [this, passNamesToCache](llvm::function_ref transform, - const mlir::tracing::Action &action) { - if (mlir::isa(action)) { - auto passAction = mlir::cast(action); - // A Pass action has occured, need to store the previous module - // before transform is completed. - std::string passName = passAction.getPass().getName().str(); - - if (passNamesToCache.empty() or - std::find(passNamesToCache.begin(), passNamesToCache.end(), - passName) != passNamesToCache.end()) { - - std::string outString; - llvm::raw_string_ostream os(outString); - mlir::OpPrintingFlags flags; - flags.enableDebugInfo(); - passAction.getOp()->print(os, flags); - os.flush(); - - this->moduleCache.emplace_back(passName, outString); - } + context->registerActionHandler([this, passNamesToCache, passedModuleCache]( + llvm::function_ref transform, + const mlir::tracing::Action &action) { + // Also might make sense to store the _FIRST_ module. Or the module before + // it was sent through the pipeline. + + if (passedModuleCache != nullptr and passedModuleCache->empty()) { + // In Python Env so we have to add it ot the passedCache + std::string passName = "PRE-PIPELINE", outString; + llvm::raw_string_ostream os(outString); + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(); + action.getContextIRUnits()[0].print(os, flags); + os.flush(); + passedModuleCache->emplace_back(passName, outString); + } else if (passedModuleCache == nullptr and moduleCache.empty()) { + // Add it to the current Cache. + std::string passName = "PRE-PIPELINE", outString; + llvm::raw_string_ostream os(outString); + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(); + action.getContextIRUnits()[0].print(os, flags); + os.flush(); + moduleCache.emplace_back(passName, outString); + } + + // Might make more sense to hold the module after a transformation has + // occured. + transform(); // Run the transformation pass. + + // Now save the module if it should be Cached. + if (mlir::isa(action)) { + auto passAction = mlir::cast(action); + // A Pass action has occured, need to store the previous module + // before transform is completed. + std::string passName = passAction.getPass().getName().str(); + + if (passNamesToCache.empty() or + std::find(passNamesToCache.begin(), passNamesToCache.end(), + passName) != passNamesToCache.end()) { + std::string outString; + llvm::raw_string_ostream os(outString); + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(); + passAction.getOp()->print(os, flags); + os.flush(); + + this->moduleCache.emplace_back(passName, outString); + } + } else if (action.getTag() == + "pass-execution") { // Tag will always be pass-execution but + // unable to cast + // This block was made considering that PassActions are weirdly not + // registered when run through python We can String parse the printed + // PassAction to determine the passName, The Op will be part of the + // IRUnits, and we can extract it + + // The printed OP looks like: + // `pass-execution` running `TTNNDeallocate` on Operation + // `builtin.module` So we can filter for the `s and get the PassName in + // between these. There will always only be 1R Unit and it is the + // ModuleOp. + + std::string passOutput, passName = ""; + llvm::raw_string_ostream passOut(passOutput); + action.print(passOut); + passOut.flush(); + + int tildeCount = 0; + const int TILDE_BEFORE_PASS_NAME = 3, TILDE_AFTER_PASS_NAME = 4; + for (const auto &c : passOutput) { + if (c == '`') { + tildeCount++; + } + + if (tildeCount == + TILDE_BEFORE_PASS_NAME) { // This is the specific tildeCount that + // prefixes the passName + passName += c; + } else if (tildeCount >= + TILDE_AFTER_PASS_NAME) { // Specific count after passName + // is complete. + break; + } + } + + // Now save the ModuleOp from the IRUnits, for PassExecution there will + // always be only 1 IR unit. + if (passNamesToCache.empty() or + std::find(passNamesToCache.begin(), passNamesToCache.end(), + passName) != passNamesToCache.end()) { + std::string outString; + llvm::raw_string_ostream os(outString); + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(); + action.getContextIRUnits()[0].print(os, flags); + os.flush(); + + // Python passes do not maintain the sufficient context to actually + // update moduleCache, one has to be passed You can pass this in + // Python using the ModuleLog class in the `passes` module. See + // python/Passes.cpp for usage. + if (passedModuleCache != nullptr) { + passedModuleCache->emplace_back(passName, outString); } - transform(); // Run the transformation pass. - }); + } + } + }); } }; diff --git a/python/Passes.cpp b/python/Passes.cpp index 43709426d4..6119217e6c 100644 --- a/python/Passes.cpp +++ b/python/Passes.cpp @@ -2,6 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 +#include "ttmlir/Conversion/Passes.h" #include "mlir/InitAllTranslations.h" #include "ttmlir/Bindings/Python/TTMLIRModule.h" #include "ttmlir/RegisterAll.h" @@ -10,6 +11,7 @@ #include PYBIND11_MAKE_OPAQUE(std::shared_ptr); +PYBIND11_MAKE_OPAQUE(std::vector>); namespace mlir::tt::ttnn { void registerTTNNToFlatbuffer(); @@ -164,25 +166,31 @@ void populatePassesModule(py::module &m) { // NOLINTEND }); - m.def("ttnn_to_flatbuffer_file", - [](MlirModule module, std::string &filepath, - std::unordered_map goldenMap) { - mlir::Operation *moduleOp = unwrap(mlirModuleGetOperation(module)); + m.def( + "ttnn_to_flatbuffer_file", + [](MlirModule module, std::string &filepath, + const std::unordered_map + &goldenMap = {}, + const std::vector> &moduleCache = + {}) { + mlir::Operation *moduleOp = unwrap(mlirModuleGetOperation(module)); - std::error_code fileError; - llvm::raw_fd_ostream file(filepath, fileError); + std::error_code fileError; + llvm::raw_fd_ostream file(filepath, fileError); - if (fileError) { - throw std::runtime_error("Failed to open file: " + filepath + - ". Error: " + fileError.message()); - } + if (fileError) { + throw std::runtime_error("Failed to open file: " + filepath + + ". Error: " + fileError.message()); + } - if (mlir::failed(mlir::tt::ttnn::translateTTNNToFlatbuffer( - moduleOp, file, goldenMap))) { - throw std::runtime_error("Failed to write flatbuffer to file: " + - filepath); - } - }); + if (mlir::failed(mlir::tt::ttnn::translateTTNNToFlatbuffer( + moduleOp, file, goldenMap, moduleCache))) { + throw std::runtime_error("Failed to write flatbuffer to file: " + + filepath); + } + }, + py::arg("module"), py::arg("filepath"), py::arg("goldenMap") = py::dict(), + py::arg("moduleCache") = py::list()); m.def("ttmetal_to_flatbuffer_file", [](MlirModule module, std::string &filepath, @@ -205,6 +213,21 @@ void populatePassesModule(py::module &m) { .value("Float32", ::tt::target::DataType::Float32) .value("Float16", ::tt::target::DataType::Float16); + m.def("lookup_dtype", [](std::string enumName) { + // Function to return the enum value based on the name. + const uint16_t minI = static_cast(::tt::target::DataType::MIN), + maxI = static_cast(::tt::target::DataType::MAX); + for (int i = minI; i <= maxI; i++) { + auto dtype = static_cast<::tt::target::DataType>(i); + std::string currEnumName = EnumNameDataType(dtype); + if (currEnumName == enumName) { + return dtype; + } + } + // Not found so return the MIN value (Float32) by Default + return ::tt::target::DataType::MIN; + }); + py::class_(m, "GoldenTensor") .def(py::init, std::vector, ::tt::target::DataType, std::uint8_t *>()) @@ -221,6 +244,31 @@ void populatePassesModule(py::module &m) { return mlir::tt::GoldenTensor(name, shape, strides, dtype, reinterpret_cast(ptr)); }); -} + py::class_>>(m, "ModuleLog") + .def(py::init<>()) + .def("to_list", + [](const std::vector> &vec) { + py::list list; + for (const auto &pair : vec) { + list.append(py::make_tuple(pair.first, pair.second)); + } + return list; + }); + + py::class_(m, "MLIRModuleLogger") + .def(py::init<>()) + .def( + "attach_context", + [](mlir::tt::MLIRModuleLogger &self, MlirContext ctx, + std::vector> &moduleCache, + std::vector &passnames_to_cache) { + self.attachContext(unwrap(ctx), passnames_to_cache, &moduleCache); + }, + py::arg("ctx"), py::arg("moduleCache").noconvert(), + py::arg("passnames_to_cache") = py::list()) + .def_property_readonly( + "module_log", + [](mlir::tt::MLIRModuleLogger &self) { return self.moduleCache; }); +} } // namespace mlir::ttmlir::python diff --git a/python/test_infra/test_utils.py b/python/test_infra/test_utils.py index da1957b7f6..7356b51f6c 100644 --- a/python/test_infra/test_utils.py +++ b/python/test_infra/test_utils.py @@ -13,6 +13,8 @@ ttnn_to_flatbuffer_file, ttir_to_ttmetal_backend_pipeline, ttmetal_to_flatbuffer_file, + MLIRModuleLogger, + ModuleLog, ) from .ttir_builder import Golden, Operand, Shape, TTIRBuilder @@ -220,9 +222,7 @@ def ttir_to_ttmetal( def ttnn_to_flatbuffer( - module, - builder, - output_file_name: str = "ttnn_fb.ttnn", + module, builder, output_file_name: str = "ttnn_fb.ttnn", module_log=None ): """ Converts TTNN module to flatbuffer and saves to file. Wrapper around @@ -230,7 +230,12 @@ def ttnn_to_flatbuffer( """ # Convert to flatbuffer file. - ttnn_to_flatbuffer_file(module, output_file_name, builder.get_golden_map()) + if module_log: + ttnn_to_flatbuffer_file( + module, output_file_name, builder.get_golden_map(), module_log + ) + else: + ttnn_to_flatbuffer_file(module, output_file_name, builder.get_golden_map()) print("`ttnn_to_flatbuffer_file` passed successfully.") @@ -326,8 +331,11 @@ def wrapper(): if "ttnn" in targets: module, builder = compile_as_mlir_module(test_fn, inputs_shapes) + module_logger = MLIRModuleLogger() + module_log = ModuleLog() + module_logger.attach_context(module.context, module_log) module = ttir_to_ttnn(module, builder, test_base + ".mlir") - ttnn_to_flatbuffer(module, builder, test_base + ".ttnn") + ttnn_to_flatbuffer(module, builder, test_base + ".ttnn", module_log) return wrapper diff --git a/tools/explorer/tt_adapter/src/tt_adapter/main.py b/tools/explorer/tt_adapter/src/tt_adapter/main.py index f8e25b84d9..58de92b3c5 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/main.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/main.py @@ -96,7 +96,8 @@ def convert( # Get performance results. perf_trace = self.model_runner.get_perf_trace(model_path) - module = utils.parse_mlir_file(optimized_model_path) + with open(optimized_model_path, "r") as model_file: + module = utils.parse_mlir_str(model_file.read()) # Convert TTIR to Model Explorer Graphs and Display/Return graph, perf_data = mlir.build_graph(module, perf_trace) @@ -107,7 +108,17 @@ def convert( if overrides := self.model_runner.get_overrides(model_path): graph = utils.add_to_dataclass(graph, "overrides", overrides) else: - module = utils.parse_mlir_file(model_path) + if model_path.endswith(".ttnn"): + # Executing on a Flatbuffer so we should parse through that path + module_str = utils.parse_flatbuffer_file( + model_path, at_pass="ConvertTTIRToTTNN" + ) + assert module_str is not None, "Failed to parse flatbuffer" + if module_str: + module = utils.parse_mlir_str(module_str) + else: + with open(model_path, "r") as model_file: + module = utils.parse_mlir_str(model_file.read()) # Convert TTIR to Model Explorer Graphs and Display/Return graph, _ = mlir.build_graph(module) diff --git a/tools/explorer/tt_adapter/src/tt_adapter/runner.py b/tools/explorer/tt_adapter/src/tt_adapter/runner.py index 577d20f807..6981f2d1a1 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/runner.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/runner.py @@ -8,7 +8,7 @@ # TODO(odjuricic) Cleaner to implement ttrt --quiet flag. # os.environ["TTRT_LOGGER_LEVEL"] = "ERROR" from ttrt import API as ttrt -import ttmlir.passes +from ttmlir import passes from . import utils, mlir import pandas as pd import threading @@ -172,6 +172,18 @@ def compile_and_run_wrapper(self, model_path, overrides_string): self.progress = 100 def compile_and_run(self, model_path, overrides_string): + FLATBUFFER = False + if model_path.endswith(".ttnn"): + # This is being run from a Flatbuffer. Need To Render TTIR from Flatbuffer + FLATBUFFER = True + # Write the TTIR from this file into a temporary file to run through the compiler + ttir_module_str = utils.parse_flatbuffer_file( + model_path, at_pass="Canonicalizer" + ) + ttir_module_path = f"{model_path}_ttir.mlir" + with open(ttir_module_path, "w+") as temp_module: + temp_module.write(ttir_module_str) + model_name = os.path.basename(model_path) flatbuffer_file = model_name + ".ttnn" state = self.model_state[model_path] @@ -193,10 +205,14 @@ def compile_and_run(self, model_path, overrides_string): ttnn_ir_file = ( f"{state.model_output_dir}/{model_name.replace('.mlir', '_ttnn.mlir')}" ) + + if FLATBUFFER: + ttnn_ir_file = f"{state.model_output_dir}/{model_name}.mlir" + compile_command = [ f"{self._build_dir}/bin/ttmlir-opt", f"--ttir-to-ttnn-backend-pipeline={overrides_string}", - model_path, + model_path if not FLATBUFFER else ttir_module_path, "-o", ttnn_ir_file, "--mlir-print-debuginfo", @@ -214,20 +230,38 @@ def compile_and_run(self, model_path, overrides_string): ############################## Translate ################################# - to_flatbuffer_command = [ - f"{self._build_dir}/bin/ttmlir-translate", - "--ttnn-to-flatbuffer", - ttnn_ir_file, - "-o", - flatbuffer_file, - ] + # Need this flatbuffer file to inherit the golden data + golden_map = utils.golden_map_from_flatbuffer(model_path) + # Get module from file + with open(ttnn_ir_file, "r") as f: + ttnn_module = utils.parse_mlir_str(f.read()) + + # Don't run the subprocess command anymore + # to_flatbuffer_command = [ + # f"{self._build_dir}/bin/ttmlir-translate", + # "--ttnn-to-flatbuffer", + # ttnn_ir_file, + # "-o", + # flatbuffer_file, + # ] self.log("Running TTNN to Flatbuffer File") - translate_process = self.run_in_subprocess(to_flatbuffer_command) - if translate_process.returncode != 0: - error = "Error while running TTNN to Flatbuffer File" - self.log(error) - raise ExplorerRunException(error) + + # Run through pybound translation so we can pass golden_map + try: + if golden_map: + passes.ttnn_to_flatbuffer_file(ttnn_module, flatbuffer_file, golden_map) + else: + passes.ttnn_to_flatbuffer_file(ttnn_module, flatbuffer_file) + except: + self.log("Error while running TTNN to Flatbuffer File") + raise ExplorerRunException() + + # translate_process = self.run_in_subprocess(to_flatbuffer_command) + # if translate_process.returncode != 0: + # error = "Error while running TTNN to Flatbuffer File" + # self.log(error) + # raise ExplorerRunException(error) self.progress = 30 ############################## TTRT Perf ################################# @@ -260,6 +294,7 @@ def compile_and_run(self, model_path, overrides_string): print("Total device duration: ", perf["DEVICE FW DURATION [ns]"].sum(), "ns") + # TTNN_IR_FILE from flatbuffer is still relevant since model_path is the FB with golden data and it will rented optimized_model_path instead state.optimized_model_path = ttnn_ir_file self.progress = 100 diff --git a/tools/explorer/tt_adapter/src/tt_adapter/utils.py b/tools/explorer/tt_adapter/src/tt_adapter/utils.py index 28886be3b2..69a257e978 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/utils.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/utils.py @@ -6,20 +6,21 @@ import importlib import logging +import torch TTRT_INSTALLED = importlib.util.find_spec("ttrt") is not None -def parse_mlir_file(model_path): - with ttmlir.ir.Context() as ctx, open(model_path, "r") as model_file: +def parse_mlir_str(module_str): + with ttmlir.ir.Context() as ctx: ttmlir.dialects.ttir.register_dialect(ctx) ttmlir.dialects.tt.register_dialect(ctx) ttmlir.dialects.ttnn.register_dialect(ctx) - module = ttmlir.ir.Module.parse(model_file.read(), ctx) + module = ttmlir.ir.Module.parse(module_str, ctx) return module -def parse_flatbuffer_file(fb_path, at_pass=None): +def parse_flatbuffer_file(fb_path, at_pass=None, program=0): if not TTRT_INSTALLED: logging.error( "TTRT is not installed in Python Environment, unable to parse Flatbuffer" @@ -35,22 +36,75 @@ def parse_flatbuffer_file(fb_path, at_pass=None): # This will load a Binary that we will parse to see if the correct attributes are present # Get the Flatbuffer as a Dict fbb_dict = fbb.fbb_dict - if "mlir_stages" not in fbb_dict: + # Get the MLIR_stages for the first program. If multiple other programs are needed in the future, parse as such. + try: + debug_info = fbb_dict["programs"][program]["debug_info"] + except: + logging.error("Flatbuffer does not contain DebugInfo on Program %d", program) + return None + + if "mlir_stages" not in debug_info: # MLIR Stages not present logging.error( "Flatbuffer does not contain Cached Module, invalid for explorer." ) return None - cached_modules = fbb_dict["mlir_stages"] + cached_modules = debug_info["mlir_stages"] for (pass_name, module) in cached_modules: if pass_name == at_pass: return module - logging.error("at_pass=%s not found in Flatbuffer.") + logging.error("at_pass=%s not found in Flatbuffer.", at_pass) return None +def golden_map_from_flatbuffer(fb_path, program=0): + # Get the golden_map from flatbuffer corresponding to the Program # provided + if not TTRT_INSTALLED: + logging.error( + "TTRT is not installed in Python Environment, unable to parse Flatbuffer." + ) + return None + from ttrt.common.util import Binary, Logger, FileManager + + logger = Logger() + file_manager = FileManager(logger) + + fbb = Binary(logger, file_manager, fb_path) + + fbb_dict = fbb.fbb_dict + + try: + debug_info = fbb_dict["programs"][program]["debug_info"] + except: + logging.error("Flatbuffer does not contain DebugInfo on Program %d", program) + return None + + if "golden_info" not in debug_info: + logging.error("Flatbuffer does not contain Golden Data.") + return None + + golden_map = debug_info["golden_info"]["golden_map"] + rendered_golden_map = {} + + # Create a np array from the data: + data_tensor = torch.Tensor(data["data"]) + + for entry in golden_map: + data = entry["value"] + tensor = ttmlir.passes.create_golden_tensor( + data["name"], + data["shape"], + data["strides"], + ttmlir.passes.lookup_dtype(data["dtype"]), + data_tensor.data_ptr, + ) + rendered_golden_map[entry["key"]] = tensor + + return rendered_golden_map + + def to_dataclass(obj: dict, dc_name: str = "tempClass"): return make_dataclass(dc_name, ((k, type(v)) for k, v in obj.items()))(**obj) From 2566bef8b053fb2cb60993ff6aeb843bcf1d8e17 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 29 Jan 2025 21:36:00 +0000 Subject: [PATCH 19/40] Fixed execution so Golden Results correctly propagate after run --- include/ttmlir/Conversion/Passes.h | 20 +++++---- python/Passes.cpp | 33 +++++++++++++- runtime/tools/python/ttrt/common/callback.py | 3 +- runtime/tools/python/ttrt/common/perf.py | 2 +- runtime/tools/python/ttrt/common/run.py | 6 +++ runtime/tools/python/ttrt/common/util.py | 8 +++- tools/explorer/CMakeLists.txt | 2 +- .../tt_adapter/src/tt_adapter/main.py | 2 +- .../tt_adapter/src/tt_adapter/runner.py | 45 ++++++++++++++++++- .../tt_adapter/src/tt_adapter/utils.py | 32 +++++-------- 10 files changed, 113 insertions(+), 40 deletions(-) diff --git a/include/ttmlir/Conversion/Passes.h b/include/ttmlir/Conversion/Passes.h index af29338e78..506118e2c3 100644 --- a/include/ttmlir/Conversion/Passes.h +++ b/include/ttmlir/Conversion/Passes.h @@ -104,20 +104,22 @@ struct MLIRModuleLogger { action.print(passOut); passOut.flush(); - int tildeCount = 0; - const int TILDE_BEFORE_PASS_NAME = 3, TILDE_AFTER_PASS_NAME = 4; + int backTickCount = 0; + const int BACKTICK_BEFORE_PASS_NAME = 3, BACKTICK_AFTER_PASS_NAME = 4; for (const auto &c : passOutput) { if (c == '`') { - tildeCount++; + backTickCount++; } - if (tildeCount == - TILDE_BEFORE_PASS_NAME) { // This is the specific tildeCount that - // prefixes the passName + if (backTickCount == + BACKTICK_BEFORE_PASS_NAME) { // This is the specific backTickCount + // that + // prefixes the passName passName += c; - } else if (tildeCount >= - TILDE_AFTER_PASS_NAME) { // Specific count after passName - // is complete. + } else if (backTickCount >= + BACKTICK_AFTER_PASS_NAME) { // Specific count after + // passName + // is complete. break; } } diff --git a/python/Passes.cpp b/python/Passes.cpp index 6119217e6c..4ad9ac731a 100644 --- a/python/Passes.cpp +++ b/python/Passes.cpp @@ -9,9 +9,12 @@ #include "ttmlir/Target/TTMetal/TTMetalToFlatbuffer.h" #include "ttmlir/Target/TTNN/TTNNToFlatbuffer.h" #include +#include PYBIND11_MAKE_OPAQUE(std::shared_ptr); PYBIND11_MAKE_OPAQUE(std::vector>); +PYBIND11_MAKE_OPAQUE(mlir::tt::GoldenTensor); +PYBIND11_MAKE_OPAQUE(std::unordered_map); namespace mlir::tt::ttnn { void registerTTNNToFlatbuffer(); @@ -189,7 +192,8 @@ void populatePassesModule(py::module &m) { filepath); } }, - py::arg("module"), py::arg("filepath"), py::arg("goldenMap") = py::dict(), + py::arg("module"), py::arg("filepath"), + py::arg("goldenMap").noconvert() = py::dict(), py::arg("moduleCache") = py::list()); m.def("ttmetal_to_flatbuffer_file", @@ -245,6 +249,33 @@ void populatePassesModule(py::module &m) { reinterpret_cast(ptr)); }); + // Opaquely bind the GoldenMapTYpe to prevent conversions and issues in + // copying + py::bind_map>( + m, "GoldenMapType"); + + m.def( + "create_golden_map", + [](std::vector keys, std::vector names, + std::vector> shapes, + std::vector> strides, + std::vector<::tt::target::DataType> dtypes, + std::vector ptrs, int numTensors) { + // Create a Golden Map entirely in C++ to prevent data corruption when + // storing to module + std::unordered_map goldenMap; + + for (int i = 0; i < numTensors; i++) { + auto tensor = + mlir::tt::GoldenTensor(names[i], shapes[i], strides[i], dtypes[i], + reinterpret_cast(ptrs[i])); + goldenMap.emplace(keys[i], tensor); + } + + return goldenMap; + }, + py::return_value_policy::move); + py::class_>>(m, "ModuleLog") .def(py::init<>()) .def("to_list", diff --git a/runtime/tools/python/ttrt/common/callback.py b/runtime/tools/python/ttrt/common/callback.py index 56223224fa..df32acf5e5 100644 --- a/runtime/tools/python/ttrt/common/callback.py +++ b/runtime/tools/python/ttrt/common/callback.py @@ -161,7 +161,8 @@ def golden(callback_runtime_config, binary, program_context, op_context): loc = ttrt.runtime.get_op_loc_info(op_context) - op_golden_tensor = binary.get_debug_info_golden(loc) + op_golden_tensor = binary.get_debug_info_golden(loc.replace("\\", "")) + logging.info("Trying to find loc: %s. USING: %s", loc, loc.replace("\\", "")) if op_golden_tensor is None: logging.debug("Golden tensor is None - skipping golden comparison") diff --git a/runtime/tools/python/ttrt/common/perf.py b/runtime/tools/python/ttrt/common/perf.py index 032036fa44..e9ac04fd1e 100644 --- a/runtime/tools/python/ttrt/common/perf.py +++ b/runtime/tools/python/ttrt/common/perf.py @@ -521,7 +521,7 @@ def signal_handler(sig, frame): for result in test_result: if result["result"] != "pass": - raise Exception(f'result["exception"]') + raise Exception(f'{result["exception"]}') except Exception as e: test_result = { diff --git a/runtime/tools/python/ttrt/common/run.py b/runtime/tools/python/ttrt/common/run.py index b83c5d390f..4b36381c78 100644 --- a/runtime/tools/python/ttrt/common/run.py +++ b/runtime/tools/python/ttrt/common/run.py @@ -456,6 +456,12 @@ def _execute(binaries): golden_tensor_torch = torch.frombuffer( golden_tensor, dtype=dtype ) + + print( + f"Data from golden_tensor {i}", + golden_tensor_torch, + ) + golden_inputs.append(golden_tensor_torch) program.populate_inputs( diff --git a/runtime/tools/python/ttrt/common/util.py b/runtime/tools/python/ttrt/common/util.py index 3f1b1adf8c..666c436d0e 100644 --- a/runtime/tools/python/ttrt/common/util.py +++ b/runtime/tools/python/ttrt/common/util.py @@ -624,8 +624,12 @@ def num_outputs(self): def populate_inputs(self, init_fn, golden_inputs=[]): if len(golden_inputs) > 0: assert len(golden_inputs) == len(self.program["inputs"]) - for golden_input in golden_inputs: - self.input_tensors.append(golden_input) + + for index, input_fb in enumerate(self.program["inputs"]): + reshaped = torch.reshape( + golden_inputs[index], input_fb["desc"]["shape"] + ) + self.input_tensors.append(reshaped) else: for i in self.program["inputs"]: torch_tensor = init_fn( diff --git a/tools/explorer/CMakeLists.txt b/tools/explorer/CMakeLists.txt index 89f7337617..9e42e66516 100644 --- a/tools/explorer/CMakeLists.txt +++ b/tools/explorer/CMakeLists.txt @@ -17,7 +17,7 @@ ExternalProject_Add( add_custom_target(explorer COMMENT "Building tt-explorer... ${TTMLIR_BIN_DIR}" - COMMAND pip install $<$:-e> ${CMAKE_CURRENT_SOURCE_DIR}/tt_adapter + COMMAND pip install -e ${CMAKE_CURRENT_SOURCE_DIR}/tt_adapter COMMAND pip uninstall -y ai-edge-model-explorer COMMAND ${CMAKE_COMMAND} -E remove_directory ${CMAKE_CURRENT_SOURCE_DIR}/model-explorer/src/model-explorer/src/server/package/build COMMAND pip install --upgrade ${CMAKE_CURRENT_SOURCE_DIR}/model-explorer/src/model-explorer/src/server/package diff --git a/tools/explorer/tt_adapter/src/tt_adapter/main.py b/tools/explorer/tt_adapter/src/tt_adapter/main.py index 58de92b3c5..2bf84dff6e 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/main.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/main.py @@ -111,7 +111,7 @@ def convert( if model_path.endswith(".ttnn"): # Executing on a Flatbuffer so we should parse through that path module_str = utils.parse_flatbuffer_file( - model_path, at_pass="ConvertTTIRToTTNN" + model_path, at_pass="PRE-PIPELINE" ) assert module_str is not None, "Failed to parse flatbuffer" if module_str: diff --git a/tools/explorer/tt_adapter/src/tt_adapter/runner.py b/tools/explorer/tt_adapter/src/tt_adapter/runner.py index 6981f2d1a1..3d6c8ddee1 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/runner.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/runner.py @@ -5,6 +5,8 @@ import os import tempfile +from collections import defaultdict + # TODO(odjuricic) Cleaner to implement ttrt --quiet flag. # os.environ["TTRT_LOGGER_LEVEL"] = "ERROR" from ttrt import API as ttrt @@ -178,7 +180,7 @@ def compile_and_run(self, model_path, overrides_string): FLATBUFFER = True # Write the TTIR from this file into a temporary file to run through the compiler ttir_module_str = utils.parse_flatbuffer_file( - model_path, at_pass="Canonicalizer" + model_path, at_pass="PRE-PIPELINE" ) ttir_module_path = f"{model_path}_ttir.mlir" with open(ttir_module_path, "w+") as temp_module: @@ -232,6 +234,42 @@ def compile_and_run(self, model_path, overrides_string): # Need this flatbuffer file to inherit the golden data golden_map = utils.golden_map_from_flatbuffer(model_path) + # need to parse this golden_map + golden_data = defaultdict(list) + rendered_golden_map = {} + + for entry in golden_map: + data = entry["value"] + # Turn this into a Torch Tensor to easily format it for the GoldenMap + # data is a uint8_t buffer type that contains the data in the format of dtype + # We will need to render this data as a buffer reference for the create_golden_tensor function + + import array + + # B is unsigned char in the array library + # This will parse the data as a 1D Buffer of uint8_t, exactly the pointer type expected by create_golden_tensor + data_arr = array.array("B", data["data"]) + golden_data["data_arrs"].append(data_arr) + # Weird keepalive measure for the GoldenData...? + + golden_data["keys"].append(entry["key"]) + golden_data["names"].append(data["name"]) + golden_data["shapes"].append(data["shape"]) + golden_data["strides"].append(data["stride"]) + golden_data["dtypes"].append(passes.lookup_dtype(data["dtype"])) + golden_data["ptrs"].append(data_arr.buffer_info()[0]) + + # Create the golden map using Pybound creator + rendered_golden_map = passes.create_golden_map( + golden_data["keys"], + golden_data["names"], + golden_data["shapes"], + golden_data["strides"], + golden_data["dtypes"], + golden_data["ptrs"], + len(golden_map), + ) + # Get module from file with open(ttnn_ir_file, "r") as f: ttnn_module = utils.parse_mlir_str(f.read()) @@ -250,7 +288,10 @@ def compile_and_run(self, model_path, overrides_string): # Run through pybound translation so we can pass golden_map try: if golden_map: - passes.ttnn_to_flatbuffer_file(ttnn_module, flatbuffer_file, golden_map) + module_log = passes.ModuleLog() + passes.ttnn_to_flatbuffer_file( + ttnn_module, flatbuffer_file, rendered_golden_map, module_log + ) else: passes.ttnn_to_flatbuffer_file(ttnn_module, flatbuffer_file) except: diff --git a/tools/explorer/tt_adapter/src/tt_adapter/utils.py b/tools/explorer/tt_adapter/src/tt_adapter/utils.py index 69a257e978..c196f8ac62 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/utils.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/utils.py @@ -3,6 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 import ttmlir from dataclasses import make_dataclass, is_dataclass, asdict +from collections import defaultdict import importlib import logging @@ -51,9 +52,11 @@ def parse_flatbuffer_file(fb_path, at_pass=None, program=0): return None cached_modules = debug_info["mlir_stages"] - for (pass_name, module) in cached_modules: - if pass_name == at_pass: - return module + # This is a dict {name: ..., source: ...} + for module in cached_modules: + if module["name"] == at_pass: + return module["source"] + print(module) logging.error("at_pass=%s not found in Flatbuffer.", at_pass) return None @@ -65,7 +68,7 @@ def golden_map_from_flatbuffer(fb_path, program=0): logging.error( "TTRT is not installed in Python Environment, unable to parse Flatbuffer." ) - return None + return [] from ttrt.common.util import Binary, Logger, FileManager logger = Logger() @@ -79,30 +82,15 @@ def golden_map_from_flatbuffer(fb_path, program=0): debug_info = fbb_dict["programs"][program]["debug_info"] except: logging.error("Flatbuffer does not contain DebugInfo on Program %d", program) - return None + return [] if "golden_info" not in debug_info: logging.error("Flatbuffer does not contain Golden Data.") - return None + return [] golden_map = debug_info["golden_info"]["golden_map"] - rendered_golden_map = {} - - # Create a np array from the data: - data_tensor = torch.Tensor(data["data"]) - - for entry in golden_map: - data = entry["value"] - tensor = ttmlir.passes.create_golden_tensor( - data["name"], - data["shape"], - data["strides"], - ttmlir.passes.lookup_dtype(data["dtype"]), - data_tensor.data_ptr, - ) - rendered_golden_map[entry["key"]] = tensor - return rendered_golden_map + return golden_map def to_dataclass(obj: dict, dc_name: str = "tempClass"): From 12c4c74031e5b259fabb3daf6e9835fb43e8c064 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 29 Jan 2025 21:51:21 +0000 Subject: [PATCH 20/40] Removed redundant print --- runtime/tools/python/ttrt/common/run.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/runtime/tools/python/ttrt/common/run.py b/runtime/tools/python/ttrt/common/run.py index 00f0972231..624610bc42 100644 --- a/runtime/tools/python/ttrt/common/run.py +++ b/runtime/tools/python/ttrt/common/run.py @@ -473,12 +473,6 @@ def _execute(binaries): golden_tensor_torch = torch.frombuffer( golden_tensor, dtype=dtype ) - - print( - f"Data from golden_tensor {i}", - golden_tensor_torch, - ) - golden_inputs.append(golden_tensor_torch) program.populate_inputs( From 8b29e8dd21ba50b81e1fa95db0c924c30c3582bf Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 29 Jan 2025 21:52:12 +0000 Subject: [PATCH 21/40] Removed redundant loc formatting --- runtime/tools/python/ttrt/common/callback.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/runtime/tools/python/ttrt/common/callback.py b/runtime/tools/python/ttrt/common/callback.py index fdbd404283..93a9af267b 100644 --- a/runtime/tools/python/ttrt/common/callback.py +++ b/runtime/tools/python/ttrt/common/callback.py @@ -204,8 +204,7 @@ def golden(callback_runtime_config, binary, program_context, op_context): loc = ttrt.runtime.get_op_loc_info(op_context) - op_golden_tensor = binary.get_debug_info_golden(loc.replace("\\", "")) - logging.info("Trying to find loc: %s. USING: %s", loc, loc.replace("\\", "")) + op_golden_tensor = binary.get_debug_info_golden(loc) if op_golden_tensor is None: logging.debug("Golden tensor is None - skipping golden comparison") From 784df87818b7cb865a75ed2bf060ea85b8c5b189 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Thu, 30 Jan 2025 17:45:07 +0000 Subject: [PATCH 22/40] Removed Golden Opaque types --- python/Passes.cpp | 33 +------------------ .../tt_adapter/src/tt_adapter/runner.py | 30 +++++------------ 2 files changed, 10 insertions(+), 53 deletions(-) diff --git a/python/Passes.cpp b/python/Passes.cpp index 4ad9ac731a..6119217e6c 100644 --- a/python/Passes.cpp +++ b/python/Passes.cpp @@ -9,12 +9,9 @@ #include "ttmlir/Target/TTMetal/TTMetalToFlatbuffer.h" #include "ttmlir/Target/TTNN/TTNNToFlatbuffer.h" #include -#include PYBIND11_MAKE_OPAQUE(std::shared_ptr); PYBIND11_MAKE_OPAQUE(std::vector>); -PYBIND11_MAKE_OPAQUE(mlir::tt::GoldenTensor); -PYBIND11_MAKE_OPAQUE(std::unordered_map); namespace mlir::tt::ttnn { void registerTTNNToFlatbuffer(); @@ -192,8 +189,7 @@ void populatePassesModule(py::module &m) { filepath); } }, - py::arg("module"), py::arg("filepath"), - py::arg("goldenMap").noconvert() = py::dict(), + py::arg("module"), py::arg("filepath"), py::arg("goldenMap") = py::dict(), py::arg("moduleCache") = py::list()); m.def("ttmetal_to_flatbuffer_file", @@ -249,33 +245,6 @@ void populatePassesModule(py::module &m) { reinterpret_cast(ptr)); }); - // Opaquely bind the GoldenMapTYpe to prevent conversions and issues in - // copying - py::bind_map>( - m, "GoldenMapType"); - - m.def( - "create_golden_map", - [](std::vector keys, std::vector names, - std::vector> shapes, - std::vector> strides, - std::vector<::tt::target::DataType> dtypes, - std::vector ptrs, int numTensors) { - // Create a Golden Map entirely in C++ to prevent data corruption when - // storing to module - std::unordered_map goldenMap; - - for (int i = 0; i < numTensors; i++) { - auto tensor = - mlir::tt::GoldenTensor(names[i], shapes[i], strides[i], dtypes[i], - reinterpret_cast(ptrs[i])); - goldenMap.emplace(keys[i], tensor); - } - - return goldenMap; - }, - py::return_value_policy::move); - py::class_>>(m, "ModuleLog") .def(py::init<>()) .def("to_list", diff --git a/tools/explorer/tt_adapter/src/tt_adapter/runner.py b/tools/explorer/tt_adapter/src/tt_adapter/runner.py index 3d6c8ddee1..d2a9d30453 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/runner.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/runner.py @@ -5,8 +5,6 @@ import os import tempfile -from collections import defaultdict - # TODO(odjuricic) Cleaner to implement ttrt --quiet flag. # os.environ["TTRT_LOGGER_LEVEL"] = "ERROR" from ttrt import API as ttrt @@ -235,7 +233,7 @@ def compile_and_run(self, model_path, overrides_string): # Need this flatbuffer file to inherit the golden data golden_map = utils.golden_map_from_flatbuffer(model_path) # need to parse this golden_map - golden_data = defaultdict(list) + kept_alive_data_arrs = [] rendered_golden_map = {} for entry in golden_map: @@ -249,26 +247,16 @@ def compile_and_run(self, model_path, overrides_string): # B is unsigned char in the array library # This will parse the data as a 1D Buffer of uint8_t, exactly the pointer type expected by create_golden_tensor data_arr = array.array("B", data["data"]) - golden_data["data_arrs"].append(data_arr) + kept_alive_data_arrs.append(data_arr) # Weird keepalive measure for the GoldenData...? - golden_data["keys"].append(entry["key"]) - golden_data["names"].append(data["name"]) - golden_data["shapes"].append(data["shape"]) - golden_data["strides"].append(data["stride"]) - golden_data["dtypes"].append(passes.lookup_dtype(data["dtype"])) - golden_data["ptrs"].append(data_arr.buffer_info()[0]) - - # Create the golden map using Pybound creator - rendered_golden_map = passes.create_golden_map( - golden_data["keys"], - golden_data["names"], - golden_data["shapes"], - golden_data["strides"], - golden_data["dtypes"], - golden_data["ptrs"], - len(golden_map), - ) + rendered_golden_map[entry["key"]] = passes.create_golden_tensor( + data["name"], + data["shape"], + data["stride"], + passes.lookup_dtype(data["dtype"]), + data_arr.buffer_info()[0], + ) # Get module from file with open(ttnn_ir_file, "r") as f: From b21279aa25cf499b5ef4f6f63a60bbc269502ec1 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Thu, 30 Jan 2025 19:57:57 +0000 Subject: [PATCH 23/40] Separated Logic between FB and MLIR loading --- .../tt_adapter/src/tt_adapter/runner.py | 117 +++++++++--------- 1 file changed, 59 insertions(+), 58 deletions(-) diff --git a/tools/explorer/tt_adapter/src/tt_adapter/runner.py b/tools/explorer/tt_adapter/src/tt_adapter/runner.py index d2a9d30453..94df79fe94 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/runner.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/runner.py @@ -231,66 +231,67 @@ def compile_and_run(self, model_path, overrides_string): ############################## Translate ################################# # Need this flatbuffer file to inherit the golden data - golden_map = utils.golden_map_from_flatbuffer(model_path) - # need to parse this golden_map - kept_alive_data_arrs = [] - rendered_golden_map = {} - - for entry in golden_map: - data = entry["value"] - # Turn this into a Torch Tensor to easily format it for the GoldenMap - # data is a uint8_t buffer type that contains the data in the format of dtype - # We will need to render this data as a buffer reference for the create_golden_tensor function - - import array - - # B is unsigned char in the array library - # This will parse the data as a 1D Buffer of uint8_t, exactly the pointer type expected by create_golden_tensor - data_arr = array.array("B", data["data"]) - kept_alive_data_arrs.append(data_arr) - # Weird keepalive measure for the GoldenData...? - - rendered_golden_map[entry["key"]] = passes.create_golden_tensor( - data["name"], - data["shape"], - data["stride"], - passes.lookup_dtype(data["dtype"]), - data_arr.buffer_info()[0], - ) - - # Get module from file - with open(ttnn_ir_file, "r") as f: - ttnn_module = utils.parse_mlir_str(f.read()) - - # Don't run the subprocess command anymore - # to_flatbuffer_command = [ - # f"{self._build_dir}/bin/ttmlir-translate", - # "--ttnn-to-flatbuffer", - # ttnn_ir_file, - # "-o", - # flatbuffer_file, - # ] + if FLATBUFFER: + golden_map = utils.golden_map_from_flatbuffer(model_path) + # need to parse this golden_map + kept_alive_data_arrs = [] + rendered_golden_map = {} + + for entry in golden_map: + data = entry["value"] + # Turn this into a Torch Tensor to easily format it for the GoldenMap + # data is a uint8_t buffer type that contains the data in the format of dtype + # We will need to render this data as a buffer reference for the create_golden_tensor function + import array + + # B is unsigned char in the array library + # This will parse the data as a 1D Buffer of uint8_t, exactly the pointer type expected by create_golden_tensor + data_arr = array.array("B", data["data"]) + kept_alive_data_arrs.append(data_arr) + # Weird keepalive measure for the GoldenData...? + + rendered_golden_map[entry["key"]] = passes.create_golden_tensor( + data["name"], + data["shape"], + data["stride"], + passes.lookup_dtype(data["dtype"]), + data_arr.buffer_info()[0], + ) - self.log("Running TTNN to Flatbuffer File") + # Get module from file + with open(ttnn_ir_file, "r") as f: + ttnn_module = utils.parse_mlir_str(f.read()) + + self.log("Running TTNN to Flatbuffer File") + + # Run through pybound translation so we can pass golden_map + try: + if golden_map: + module_log = passes.ModuleLog() + passes.ttnn_to_flatbuffer_file( + ttnn_module, flatbuffer_file, rendered_golden_map, module_log + ) + else: + passes.ttnn_to_flatbuffer_file(ttnn_module, flatbuffer_file) + except: + self.log("Error while running TTNN to Flatbuffer File") + raise ExplorerRunException() + else: + # Translate to Flatbuffer normally. + to_flatbuffer_command = [ + f"{self._build_dir}/bin/ttmlir-translate", + "--ttnn-to-flatbuffer", + ttnn_ir_file, + "-o", + flatbuffer_file, + ] + + translate_process = self.run_in_subprocess(to_flatbuffer_command) + if translate_process.returncode != 0: + error = "Error while running TTNN to Flatbuffer File" + self.log(error) + raise ExplorerRunException(error) - # Run through pybound translation so we can pass golden_map - try: - if golden_map: - module_log = passes.ModuleLog() - passes.ttnn_to_flatbuffer_file( - ttnn_module, flatbuffer_file, rendered_golden_map, module_log - ) - else: - passes.ttnn_to_flatbuffer_file(ttnn_module, flatbuffer_file) - except: - self.log("Error while running TTNN to Flatbuffer File") - raise ExplorerRunException() - - # translate_process = self.run_in_subprocess(to_flatbuffer_command) - # if translate_process.returncode != 0: - # error = "Error while running TTNN to Flatbuffer File" - # self.log(error) - # raise ExplorerRunException(error) self.progress = 30 ############################## TTRT Perf ################################# From 3723de5a2f8ece2dbb6868d4f5e4f18f7a2e8850 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Thu, 30 Jan 2025 21:41:35 +0000 Subject: [PATCH 24/40] Removed PassedModuleCache --- include/ttmlir/Conversion/Passes.h | 31 +++++----------- python/Passes.cpp | 36 +++++++++---------- python/test_infra/test_utils.py | 7 ++-- .../tt_adapter/src/tt_adapter/runner.py | 13 ++++--- 4 files changed, 35 insertions(+), 52 deletions(-) diff --git a/include/ttmlir/Conversion/Passes.h b/include/ttmlir/Conversion/Passes.h index 29919d74d4..bfbf291088 100644 --- a/include/ttmlir/Conversion/Passes.h +++ b/include/ttmlir/Conversion/Passes.h @@ -32,27 +32,15 @@ struct MLIRModuleLogger { std::vector> moduleCache; void attachContext(mlir::MLIRContext *ctx, - std::vector passNamesToCache = {}, - std::vector> - *passedModuleCache = nullptr) { + std::vector passNamesToCache = {}) { context = ctx; - context->registerActionHandler([this, passNamesToCache, passedModuleCache]( + context->registerActionHandler([this, passNamesToCache]( llvm::function_ref transform, const mlir::tracing::Action &action) { // Also might make sense to store the _FIRST_ module. Or the module before // it was sent through the pipeline. - - if (passedModuleCache != nullptr and passedModuleCache->empty()) { - // In Python Env so we have to add it ot the passedCache - std::string passName = "PRE-PIPELINE", outString; - llvm::raw_string_ostream os(outString); - mlir::OpPrintingFlags flags; - flags.enableDebugInfo(); - action.getContextIRUnits()[0].print(os, flags); - os.flush(); - passedModuleCache->emplace_back(passName, outString); - } else if (passedModuleCache == nullptr and moduleCache.empty()) { + if (moduleCache.empty()) { // Add it to the current Cache. std::string passName = "PRE-PIPELINE", outString; llvm::raw_string_ostream os(outString); @@ -84,6 +72,8 @@ struct MLIRModuleLogger { passAction.getOp()->print(os, flags); os.flush(); + llvm::outs() << "Pass: " << passName << " has correct TypeID\n"; + this->moduleCache.emplace_back(passName, outString); } } else if (action.getTag() == @@ -125,6 +115,9 @@ struct MLIRModuleLogger { } } + llvm::outs() << "Pass: " << passName + << " was not registered correctly.\n"; + // Now save the ModuleOp from the IRUnits, for PassExecution there will // always be only 1 IR unit. if (passNamesToCache.empty() or @@ -137,13 +130,7 @@ struct MLIRModuleLogger { action.getContextIRUnits()[0].print(os, flags); os.flush(); - // Python passes do not maintain the sufficient context to actually - // update moduleCache, one has to be passed You can pass this in - // Python using the ModuleLog class in the `passes` module. See - // python/Passes.cpp for usage. - if (passedModuleCache != nullptr) { - passedModuleCache->emplace_back(passName, outString); - } + this->moduleCache.emplace_back(passName, outString); } } }); diff --git a/python/Passes.cpp b/python/Passes.cpp index 6119217e6c..fcba3a2728 100644 --- a/python/Passes.cpp +++ b/python/Passes.cpp @@ -9,6 +9,7 @@ #include "ttmlir/Target/TTMetal/TTMetalToFlatbuffer.h" #include "ttmlir/Target/TTNN/TTNNToFlatbuffer.h" #include +#include PYBIND11_MAKE_OPAQUE(std::shared_ptr); PYBIND11_MAKE_OPAQUE(std::vector>); @@ -166,6 +167,11 @@ void populatePassesModule(py::module &m) { // NOLINTEND }); + // This binds the vector into an interfaceable object in python and also an + // opaquely passed one into other functions. + py::bind_vector>>( + m, "ModuleLog"); + m.def( "ttnn_to_flatbuffer_file", [](MlirModule module, std::string &filepath, @@ -190,7 +196,8 @@ void populatePassesModule(py::module &m) { } }, py::arg("module"), py::arg("filepath"), py::arg("goldenMap") = py::dict(), - py::arg("moduleCache") = py::list()); + py::arg("moduleCache") = + std::vector>()); m.def("ttmetal_to_flatbuffer_file", [](MlirModule module, std::string &filepath, @@ -245,30 +252,19 @@ void populatePassesModule(py::module &m) { reinterpret_cast(ptr)); }); - py::class_>>(m, "ModuleLog") - .def(py::init<>()) - .def("to_list", - [](const std::vector> &vec) { - py::list list; - for (const auto &pair : vec) { - list.append(py::make_tuple(pair.first, pair.second)); - } - return list; - }); - - py::class_(m, "MLIRModuleLogger") + py::class_>(m, "MLIRModuleLogger") .def(py::init<>()) .def( "attach_context", - [](mlir::tt::MLIRModuleLogger &self, MlirContext ctx, - std::vector> &moduleCache, + [](std::shared_ptr &self, MlirContext ctx, std::vector &passnames_to_cache) { - self.attachContext(unwrap(ctx), passnames_to_cache, &moduleCache); + self->attachContext(unwrap(ctx), passnames_to_cache); }, - py::arg("ctx"), py::arg("moduleCache").noconvert(), - py::arg("passnames_to_cache") = py::list()) + py::arg("ctx"), py::arg("passnames_to_cache") = py::list()) .def_property_readonly( - "module_log", - [](mlir::tt::MLIRModuleLogger &self) { return self.moduleCache; }); + "module_log", [](std::shared_ptr &self) { + return self->moduleCache; + }); } } // namespace mlir::ttmlir::python diff --git a/python/test_infra/test_utils.py b/python/test_infra/test_utils.py index 4efea1c7ae..73b6b3ee0c 100644 --- a/python/test_infra/test_utils.py +++ b/python/test_infra/test_utils.py @@ -346,10 +346,11 @@ def wrapper(): test_fn, inputs_shapes, inputs_types ) module_logger = MLIRModuleLogger() - module_log = ModuleLog() - module_logger.attach_context(module.context, module_log) + module_logger.attach_context(module.context) module = ttir_to_ttnn(module, builder, test_base + ".mlir") - ttnn_to_flatbuffer(module, builder, test_base + ".ttnn", module_log) + ttnn_to_flatbuffer( + module, builder, test_base + ".ttnn", module_logger.module_log + ) return wrapper diff --git a/tools/explorer/tt_adapter/src/tt_adapter/runner.py b/tools/explorer/tt_adapter/src/tt_adapter/runner.py index 94df79fe94..8086b577a8 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/runner.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/runner.py @@ -267,9 +267,8 @@ def compile_and_run(self, model_path, overrides_string): # Run through pybound translation so we can pass golden_map try: if golden_map: - module_log = passes.ModuleLog() passes.ttnn_to_flatbuffer_file( - ttnn_module, flatbuffer_file, rendered_golden_map, module_log + ttnn_module, flatbuffer_file, rendered_golden_map ) else: passes.ttnn_to_flatbuffer_file(ttnn_module, flatbuffer_file) @@ -286,11 +285,11 @@ def compile_and_run(self, model_path, overrides_string): flatbuffer_file, ] - translate_process = self.run_in_subprocess(to_flatbuffer_command) - if translate_process.returncode != 0: - error = "Error while running TTNN to Flatbuffer File" - self.log(error) - raise ExplorerRunException(error) + translate_process = self.run_in_subprocess(to_flatbuffer_command) + if translate_process.returncode != 0: + error = "Error while running TTNN to Flatbuffer File" + self.log(error) + raise ExplorerRunException(error) self.progress = 30 From 95c0f5c54d033a95a68817c0762f6a3721b6ee14 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Fri, 31 Jan 2025 19:38:20 +0000 Subject: [PATCH 25/40] Fixed TypeID discrepancy - Thanks Nick --- include/ttmlir/Conversion/Passes.h | 104 +---------------------------- lib/RegisterAll.cpp | 47 +++++++++++++ 2 files changed, 48 insertions(+), 103 deletions(-) diff --git a/include/ttmlir/Conversion/Passes.h b/include/ttmlir/Conversion/Passes.h index bfbf291088..7daf8cfe26 100644 --- a/include/ttmlir/Conversion/Passes.h +++ b/include/ttmlir/Conversion/Passes.h @@ -32,109 +32,7 @@ struct MLIRModuleLogger { std::vector> moduleCache; void attachContext(mlir::MLIRContext *ctx, - std::vector passNamesToCache = {}) { - context = ctx; - - context->registerActionHandler([this, passNamesToCache]( - llvm::function_ref transform, - const mlir::tracing::Action &action) { - // Also might make sense to store the _FIRST_ module. Or the module before - // it was sent through the pipeline. - if (moduleCache.empty()) { - // Add it to the current Cache. - std::string passName = "PRE-PIPELINE", outString; - llvm::raw_string_ostream os(outString); - mlir::OpPrintingFlags flags; - flags.enableDebugInfo(); - action.getContextIRUnits()[0].print(os, flags); - os.flush(); - moduleCache.emplace_back(passName, outString); - } - - // Might make more sense to hold the module after a transformation has - // occured. - transform(); // Run the transformation pass. - - // Now save the module if it should be Cached. - if (mlir::isa(action)) { - auto passAction = mlir::cast(action); - // A Pass action has occured, need to store the previous module - // before transform is completed. - std::string passName = passAction.getPass().getName().str(); - - if (passNamesToCache.empty() or - std::find(passNamesToCache.begin(), passNamesToCache.end(), - passName) != passNamesToCache.end()) { - std::string outString; - llvm::raw_string_ostream os(outString); - mlir::OpPrintingFlags flags; - flags.enableDebugInfo(); - passAction.getOp()->print(os, flags); - os.flush(); - - llvm::outs() << "Pass: " << passName << " has correct TypeID\n"; - - this->moduleCache.emplace_back(passName, outString); - } - } else if (action.getTag() == - "pass-execution") { // Tag will always be pass-execution but - // unable to cast - // This block was made considering that PassActions are weirdly not - // registered when run through python We can String parse the printed - // PassAction to determine the passName, The Op will be part of the - // IRUnits, and we can extract it - - // The printed OP looks like: - // `pass-execution` running `TTNNDeallocate` on Operation - // `builtin.module` So we can filter for the `s and get the PassName in - // between these. There will always only be 1R Unit and it is the - // ModuleOp. - - std::string passOutput, passName = ""; - llvm::raw_string_ostream passOut(passOutput); - action.print(passOut); - passOut.flush(); - - int backTickCount = 0; - const int BACKTICK_BEFORE_PASS_NAME = 3, BACKTICK_AFTER_PASS_NAME = 4; - for (const auto &c : passOutput) { - if (c == '`') { - backTickCount++; - } - - if (backTickCount == - BACKTICK_BEFORE_PASS_NAME) { // This is the specific backTickCount - // that - // prefixes the passName - passName += c; - } else if (backTickCount >= - BACKTICK_AFTER_PASS_NAME) { // Specific count after - // passName - // is complete. - break; - } - } - - llvm::outs() << "Pass: " << passName - << " was not registered correctly.\n"; - - // Now save the ModuleOp from the IRUnits, for PassExecution there will - // always be only 1 IR unit. - if (passNamesToCache.empty() or - std::find(passNamesToCache.begin(), passNamesToCache.end(), - passName) != passNamesToCache.end()) { - std::string outString; - llvm::raw_string_ostream os(outString); - mlir::OpPrintingFlags flags; - flags.enableDebugInfo(); - action.getContextIRUnits()[0].print(os, flags); - os.flush(); - - this->moduleCache.emplace_back(passName, outString); - } - } - }); - } + std::vector passNamesToCache); }; } // namespace mlir::tt diff --git a/lib/RegisterAll.cpp b/lib/RegisterAll.cpp index 4f66a2bccf..963385e82e 100644 --- a/lib/RegisterAll.cpp +++ b/lib/RegisterAll.cpp @@ -83,3 +83,50 @@ void mlir::tt::registerAllPasses() { mlir::tt::ttnn::registerTTNNPipelines(); mlir::tt::ttmetal::registerTTMetalPipelines(); } + +void mlir::tt::MLIRModuleLogger::attachContext( + mlir::MLIRContext *ctx, std::vector passNamesToCache = {}) { + context = ctx; + + context->registerActionHandler( + [this, passNamesToCache](llvm::function_ref transform, + const mlir::tracing::Action &action) { + // Also might make sense to store the _FIRST_ module. Or the module + // before it was sent through the pipeline. + if (moduleCache.empty()) { + // Add it to the current Cache. + std::string passName = "PRE-PIPELINE", outString; + llvm::raw_string_ostream os(outString); + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(); + action.getContextIRUnits()[0].print(os, flags); + os.flush(); + moduleCache.emplace_back(passName, outString); + } + + // Might make more sense to hold the module after a transformation has + // occured. + transform(); // Run the transformation pass. + + // Now save the module if it should be Cached. + if (mlir::isa(action)) { + auto passAction = mlir::cast(action); + // A Pass action has occured, need to store the previous module + // before transform is completed. + std::string passName = passAction.getPass().getName().str(); + + if (passNamesToCache.empty() or + std::find(passNamesToCache.begin(), passNamesToCache.end(), + passName) != passNamesToCache.end()) { + std::string outString; + llvm::raw_string_ostream os(outString); + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(); + passAction.getOp()->print(os, flags); + os.flush(); + + this->moduleCache.emplace_back(passName, outString); + } + } + }); +} From c42ff1887de2597270ccef66e55e9bdd7234d4cb Mon Sep 17 00:00:00 2001 From: Vraj Prajapati Date: Tue, 4 Feb 2025 19:47:27 +0000 Subject: [PATCH 26/40] Removed redundant debug prints & assert --- tools/explorer/tt_adapter/src/tt_adapter/main.py | 4 +++- tools/explorer/tt_adapter/src/tt_adapter/utils.py | 1 - 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/explorer/tt_adapter/src/tt_adapter/main.py b/tools/explorer/tt_adapter/src/tt_adapter/main.py index 2bf84dff6e..e519365f47 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/main.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/main.py @@ -113,9 +113,11 @@ def convert( module_str = utils.parse_flatbuffer_file( model_path, at_pass="PRE-PIPELINE" ) - assert module_str is not None, "Failed to parse flatbuffer" + if module_str: module = utils.parse_mlir_str(module_str) + elif module_str is None: + raise Exception("Failed to parse flatbuffer") else: with open(model_path, "r") as model_file: module = utils.parse_mlir_str(model_file.read()) diff --git a/tools/explorer/tt_adapter/src/tt_adapter/utils.py b/tools/explorer/tt_adapter/src/tt_adapter/utils.py index c196f8ac62..69fab1c623 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/utils.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/utils.py @@ -56,7 +56,6 @@ def parse_flatbuffer_file(fb_path, at_pass=None, program=0): for module in cached_modules: if module["name"] == at_pass: return module["source"] - print(module) logging.error("at_pass=%s not found in Flatbuffer.", at_pass) return None From 5acfac26f4b2338ec7196bb3602bdc92894df217 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 5 Feb 2025 20:10:35 +0000 Subject: [PATCH 27/40] Added Variable Path in test_ttir_ops --- python/test_infra/test_utils.py | 27 +++++++++++++++++++++++++++ test/python/golden/test_ttir_ops.py | 17 ++++++++++++++++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/python/test_infra/test_utils.py b/python/test_infra/test_utils.py index e28214c813..1b46dd0bd7 100644 --- a/python/test_infra/test_utils.py +++ b/python/test_infra/test_utils.py @@ -20,6 +20,8 @@ TT_MLIR_HOME = os.environ.get("TT_MLIR_HOME", "") +# Default output to the current directory from where this module is being invoked +OUTPUT_PATH = "" # ----- Static helpers used in this file only ----- @@ -30,6 +32,25 @@ def _dump_module(module: Module) -> None: # ----- General Purpose Helpers - Could Be Used In Other Files ----- +def set_output_path(path): + global OUTPUT_PATH + if not os.path.exists(path): + raise ValueError(f"The provided path '{path}' is not a valid path.") + OUTPUT_PATH = path + + +def get_ttnn_path(filename): + ttnn_dir = os.path.join(OUTPUT_PATH, "ttnn") + if not os.path.exists(ttnn_dir): + os.makedirs(ttnn_dir) + return os.path.join(ttnn_dir, filename) + + +def get_ttmetal_path(filename): + ttmetal_dir = os.path.join(OUTPUT_PATH, "ttmetal") + if not os.path.exists(ttmetal_dir): + os.makedirs(ttmetal_dir) + return os.path.join(ttmetal_dir, filename) def compile_as_mlir_module( @@ -177,6 +198,7 @@ def ttir_to_ttnn( # Optionally dump to file. if dump_to_file: + output_file_name = get_ttnn_path(output_file_name) with open(output_file_name, "w") as f: f.write(str(module)) @@ -222,6 +244,7 @@ def ttir_to_ttmetal( # Optionally dump to file. if dump_to_file: + output_file_name = get_ttmetal_path(output_file_name) with open(output_file_name, "w") as f: f.write(str(module)) @@ -239,6 +262,8 @@ def ttnn_to_flatbuffer( """ # Convert to flatbuffer file. + # Take the output_file_name and prefix with the ttnn directory + output_file_name = get_ttnn_path(output_file_name) ttnn_to_flatbuffer_file(module, output_file_name, builder.get_golden_map()) print("`ttnn_to_flatbuffer_file` passed successfully.") @@ -255,6 +280,8 @@ def ttmetal_to_flatbuffer( """ # Convert to flatbuffer file. + # Take the output_file_name and prefix with ttm directory + output_file_name = get_ttmetal_path(output_file_name) ttmetal_to_flatbuffer_file(module, output_file_name, builder.get_golden_map()) print("`ttmetal_to_flatbuffer_file` passed successfully.") diff --git a/test/python/golden/test_ttir_ops.py b/test/python/golden/test_ttir_ops.py index 14fbd10b75..856d9871f0 100644 --- a/test/python/golden/test_ttir_ops.py +++ b/test/python/golden/test_ttir_ops.py @@ -7,7 +7,7 @@ import inspect import torch -from ttmlir.test_utils import compile_to_flatbuffer +from ttmlir.test_utils import compile_to_flatbuffer, set_output_path from ttmlir.ttir_builder import Operand, TTIRBuilder, Attribute @@ -410,6 +410,21 @@ def test_mnist( if __name__ == "__main__": + import argparse, os + + parser = argparse.ArgumentParser(description="Run TTIR tests") + parser.add_argument( + "--path", + type=str, + help="Optional output path for the flatbuffer. Creates path if supplied path doesn't exist", + ) + args = parser.parse_args() + + if args.path and os.path.exists(args.path): + if not os.path.exists(args.path): + os.makedirs(args.path) + set_output_path(args.path) + test_functions = inspect.getmembers( inspect.getmodule(inspect.currentframe()), inspect.isfunction ) From d86d7027e8354a1dbe35b1852be6468bd4ae2799 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 5 Feb 2025 20:35:33 +0000 Subject: [PATCH 28/40] Added TTNN Tests + Env Var for Explorer CI --- .github/workflows/build-and-test.yml | 3 ++- tools/explorer/test/run_tests.py | 11 +++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 230feeaacc..fde8bc97a5 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -636,7 +636,8 @@ jobs: shell: bash run: | source env/activate - export TT_EXPLORER_GENERATED_TEST_DIR=${{ steps.strings.outputs.build-output-dir }}/test/ttmlir/Silicon/TTNN + export TT_EXPLORER_GENERATED_MLIR_TEST_DIRS=${{ steps.strings.outputs.build-output-dir }}/test/ttmlir/Silicon/TTNN,${{ steps.strings.outputs.build-output-dir }}/test/python/golden/ttnn + export TT_EXPLORER_GENERATED_TTNN_TEST_DIRS=${{ steps.strings.outputs.build-output-dir }}/test/python/golden/ttnn pytest tools/explorer/test/run_tests.py # collect results diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index 9fea9ee9cd..93f064ce64 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -23,10 +23,13 @@ MNIST_SHARDING_PATH, ] -if "TT_EXPLORER_GENERATED_TEST_DIR" in os.environ: - TEST_LOAD_MODEL_PATHS.append( - os.environ["TT_EXPLORER_GENERATED_TEST_DIR"] + "/**/*.mlir" - ) +if "TT_EXPLORER_GENERATED_MLIR_TEST_DIRS" in os.environ: + for path in os.environ["TT_EXPLORER_GENERATED_MLIR_TEST_DIRS"].split(","): + TEST_LOAD_MODEL_PATHS.append(os.path.join(path, "/**/*.mlir")) + +if "TT_EXPLORER_GENERATED_TTNN_TEST_DIRS" in os.environ: + for path in os.environ["TT_EXPLORER_GENERATED_TTNN_TEST_DIRS"].split(","): + TEST_LOAD_MODEL_PATHS.append(os.path.join(path, "/**/*.ttnn")) def get_test_files(paths): From ac0edccbedbda2ae3c4c0f18d13a38c44fb04c72 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 5 Feb 2025 22:18:15 +0000 Subject: [PATCH 29/40] Added TTNN Tests into Explorer, fixed small bugs --- tools/explorer/test/run_tests.py | 28 +++------------------------- 1 file changed, 3 insertions(+), 25 deletions(-) diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index 93f064ce64..ad8b2e4c29 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -25,11 +25,12 @@ if "TT_EXPLORER_GENERATED_MLIR_TEST_DIRS" in os.environ: for path in os.environ["TT_EXPLORER_GENERATED_MLIR_TEST_DIRS"].split(","): - TEST_LOAD_MODEL_PATHS.append(os.path.join(path, "/**/*.mlir")) + TEST_LOAD_MODEL_PATHS.append(path + "/**/*.mlir") if "TT_EXPLORER_GENERATED_TTNN_TEST_DIRS" in os.environ: for path in os.environ["TT_EXPLORER_GENERATED_TTNN_TEST_DIRS"].split(","): - TEST_LOAD_MODEL_PATHS.append(os.path.join(path, "/**/*.ttnn")) + TEST_LOAD_MODEL_PATHS.append(path + "/**/*.ttnn") + TEST_EXECUTE_MODEL_PATHS.append(path + "/**/*.ttnn") def get_test_files(paths): @@ -39,22 +40,6 @@ def get_test_files(paths): return files -def execute_command(model_path, settings): - cmd = { - "extensionId": "tt_adapter", - "cmdId": "execute", - "modelPath": model_path, - "deleteAfterConversion": False, - "settings": settings, - } - - result = requests.post(COMMAND_URL, json=cmd) - assert result.ok - if "error" in result.json(): - print(result.json()) - assert False - - @pytest.fixture(scope="function", autouse=True) def start_server(request): """Start the model explorer server before running tests and stop it after.""" @@ -91,13 +76,6 @@ def server_shutdown(): request.addfinalizer(server_shutdown) -def get_test_files(paths): - files = [] - for path in paths: - files.extend(glob.glob(path)) - return files - - def send_command(command, model_path, settings={}): cmd = { "extensionId": "tt_adapter", From e7e9e950850995a8cb8a4c41cd278f964fbed75c Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Fri, 7 Feb 2025 17:30:05 +0000 Subject: [PATCH 30/40] Interim --- python/TTNNModule.cpp | 8 +++++++- tools/explorer/test/run_tests.py | 3 +-- tools/explorer/tt_adapter/src/tt_adapter/mlir.py | 16 +++++++++++++--- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/python/TTNNModule.cpp b/python/TTNNModule.cpp index 3610c1d211..d8d5ae66e7 100644 --- a/python/TTNNModule.cpp +++ b/python/TTNNModule.cpp @@ -56,7 +56,13 @@ void populateTTNNModule(py::module &m) { }) .def_property_readonly("value", [](tt::ttnn::TensorMemoryLayoutAttr self) { - return static_cast(self.getValue()); + llvm::outs() << "Before\n"; + auto layout = self.getValue(); + llvm::outs() << "Got the Layout\n"; + auto casted = static_cast(layout); + llvm::outs() << "Cast the Layout\n"; + + return casted; }); tt_attribute_class(m, "BufferTypeAttr") .def_static( diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index ad8b2e4c29..164804f878 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -14,9 +14,8 @@ PORT = 8002 COMMAND_URL = "http://" + HOST + ":" + str(PORT) + "/apipost/v1/send_command" TEST_LOAD_MODEL_PATHS = [ - "test/ttmlir/Dialect/TTNN/optimizer/mnist_sharding.mlir", "test/ttmlir/Explorer/**/*.mlir", - "test/ttmlir/Silicon/TTNN/**/*.mlir", + "test/ttmlir/Dialect/TTNN/**/*.mlir", ] MNIST_SHARDING_PATH = "test/ttmlir/Silicon/TTNN/n150/optimizer/mnist_sharding.mlir" TEST_EXECUTE_MODEL_PATHS = [ diff --git a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py index 86695e9773..7a273e74ac 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py @@ -305,14 +305,24 @@ def parse_memory_config(attr): value="x".join(map(str, memory_config.shard_spec.shard_shape.shape)), ) ) + + my_layout = memory_config.tensor_memory_layout + my_val = int(my_layout.value) + print(my_layout, my_val) + my_mlir_layout = ttnn.TensorMemoryLayout(my_val) + print(my_mlir_layout) + my_mlir_layout_repr = str(my_mlir_layout) + print(my_mlir_layout_repr) + result.append( graph_builder.KeyValue( key="tensor-memory-layout", - value=str( - ttnn.TensorMemoryLayout(memory_config.tensor_memory_layout.value) - ), + value=my_mlir_layout_repr, ) ) + + print("Made it here!") + return result From f5fb30649b38ca44e0088b15bec1ec9621cb8df8 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Fri, 14 Feb 2025 17:42:10 +0000 Subject: [PATCH 31/40] Added Accuracy Overlay + Some Tests --- tools/explorer/test/run_tests.py | 8 +-- .../tt_adapter/src/tt_adapter/main.py | 17 +++++-- .../tt_adapter/src/tt_adapter/mlir.py | 50 +++++++++++++++---- .../tt_adapter/src/tt_adapter/runner.py | 12 +++++ 4 files changed, 70 insertions(+), 17 deletions(-) diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index 164804f878..5f23604a7f 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -15,7 +15,7 @@ COMMAND_URL = "http://" + HOST + ":" + str(PORT) + "/apipost/v1/send_command" TEST_LOAD_MODEL_PATHS = [ "test/ttmlir/Explorer/**/*.mlir", - "test/ttmlir/Dialect/TTNN/**/*.mlir", + "test/ttmlir/Silicon/TTNN/n150/perf/**/*.mlir", ] MNIST_SHARDING_PATH = "test/ttmlir/Silicon/TTNN/n150/optimizer/mnist_sharding.mlir" TEST_EXECUTE_MODEL_PATHS = [ @@ -137,7 +137,7 @@ def test_load_model(model_path): @pytest.mark.parametrize("model_path", get_test_files(TEST_EXECUTE_MODEL_PATHS)) def test_execute_model(model_path): execute_command_and_wait( - model_path, {"optimizationPolicy": "DF Sharding"}, timeout=300 + model_path, {"optimizationPolicy": "Optimizer Disabled"}, timeout=300 ) convert_command_and_assert(model_path) @@ -151,10 +151,10 @@ def test_execute_mnist_l1_interleaved(): convert_command_and_assert(MNIST_SHARDING_PATH) -def test_execute_mnist_optimizer_disabled(): +def test_execute_mnist_df_sharding(): execute_command_and_wait( MNIST_SHARDING_PATH, - {"optimizationPolicy": "Optimizer Disabled"}, + {"optimizationPolicy": "DF Sharding"}, timeout=300, ) convert_command_and_assert(MNIST_SHARDING_PATH) diff --git a/tools/explorer/tt_adapter/src/tt_adapter/main.py b/tools/explorer/tt_adapter/src/tt_adapter/main.py index e519365f47..3713f22010 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/main.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/main.py @@ -95,15 +95,24 @@ def convert( print(f"Using optimized model: {optimized_model_path}") # Get performance results. perf_trace = self.model_runner.get_perf_trace(model_path) + golden_results = self.model_runner.get_golden_results(model_path) with open(optimized_model_path, "r") as model_file: module = utils.parse_mlir_str(model_file.read()) # Convert TTIR to Model Explorer Graphs and Display/Return - graph, perf_data = mlir.build_graph(module, perf_trace) + graph, perf_data, accuracy_data = mlir.build_graph( + module, perf_trace, golden_results + ) + overlays = {} if perf_data: - # TODO(odjuricic) We should replace the perf_data with overlays once this is fixed on FE. - graph = utils.add_to_dataclass(graph, "perf_data", perf_data.graphsData) + overlays["perf_data"] = perf_data.graphsData + + if accuracy_data: + overlays["accuracy_data"] = accuracy_data.graphsData + + if overlays: + graph = utils.add_to_dataclass(graph, "overlays", overlays) if overrides := self.model_runner.get_overrides(model_path): graph = utils.add_to_dataclass(graph, "overrides", overrides) @@ -123,7 +132,7 @@ def convert( module = utils.parse_mlir_str(model_file.read()) # Convert TTIR to Model Explorer Graphs and Display/Return - graph, _ = mlir.build_graph(module) + graph, _, _ = mlir.build_graph(module) return {"graphs": [graph]} diff --git a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py index 7a273e74ac..f37090b938 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py @@ -308,11 +308,8 @@ def parse_memory_config(attr): my_layout = memory_config.tensor_memory_layout my_val = int(my_layout.value) - print(my_layout, my_val) my_mlir_layout = ttnn.TensorMemoryLayout(my_val) - print(my_mlir_layout) my_mlir_layout_repr = str(my_mlir_layout) - print(my_mlir_layout_repr) result.append( graph_builder.KeyValue( @@ -321,8 +318,6 @@ def parse_memory_config(attr): ) ) - print("Made it here!") - return result @@ -588,7 +583,7 @@ def make_constant_node(self, constant_name): ] -def build_graph(module, perf_trace=None): +def build_graph(module, perf_trace=None, golden_results=None): output_connections = defaultdict(int) graph = graph_builder.Graph(id="tt-graph") @@ -604,6 +599,17 @@ def build_graph(module, perf_trace=None): if loc: loc_to_perf[loc] = row["DEVICE FW DURATION [ns]"] + # Parse Golden Results for Overlay + accuracy_node_data = {} + loc_to_accuracy = {} + if golden_results is not None: + for loc, res in golden_results.items(): + loc = parse_loc_string(loc) + assert loc not in loc_to_accuracy + if loc: + # Store the full result here, just need to parse the loc accordingly= + loc_to_accuracy[loc] = res + module_op = OpHandler(module.operation) module_attrs = module_op.get_attributes() module_attrs = dict((attr.key, attr.value) for attr in module_attrs) @@ -628,6 +634,17 @@ def build_graph(module, perf_trace=None): loc_to_perf[operation.named_location] ) + if ( + operation.named_location in loc_to_accuracy + and operation.op.name not in EMPTY_OPS + ): + accuracy_node_data[ + operation.id + ] = node_data_builder.NodeDataResult( + loc_to_accuracy[operation.named_location]["actual_pcc"] + - loc_to_accuracy[operation.named_location]["expected_pcc"] + ) + if op.name not in FILTERED_OPS and op.name in EMPTY_OPS: append_later.append(graph_node) elif op.name not in FILTERED_OPS: @@ -713,7 +730,7 @@ def build_graph(module, perf_trace=None): output_connections[source_node.id] += 1 # Add performance data to the graph color overlay, if it exists - overlay_data = None + perf_data = None if perf_node_data: gradient = [ node_data_builder.GradientItem(stop=0, bgColor="yellow"), @@ -722,9 +739,24 @@ def build_graph(module, perf_trace=None): graph_node_data = node_data_builder.GraphNodeData( results=perf_node_data, gradient=gradient ) - overlay_data = node_data_builder.ModelNodeData( + perf_data = node_data_builder.ModelNodeData( + graphsData={"tt-graph": graph_node_data} + ) + + accuracy_data = None + if accuracy_node_data: + thres = [ + # Show Green if ActualPCC - ExpectedPCC is 1 and below (Actual PCC >= ExpectedPCC) + node_data_builder.ThresholdItem(value=1, bgColor="green"), + # Show Red if ActualPCC - ExpectedPCC is 0 and below (ActualPCC < ExpectedPCC) + node_data_builder.ThresholdItem(value=0, bgColor="red"), + ] + graph_node_data = node_data_builder.GraphNodeData( + results=accuracy_node_data, thresholds=thres + ) + accuracy_data = node_data_builder.ModelNodeData( graphsData={"tt-graph": graph_node_data} ) graph.groupNodeAttributes = group_node_attrs - return graph, overlay_data + return graph, perf_data, accuracy_data diff --git a/tools/explorer/tt_adapter/src/tt_adapter/runner.py b/tools/explorer/tt_adapter/src/tt_adapter/runner.py index 8086b577a8..900d124a6d 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/runner.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/runner.py @@ -13,6 +13,7 @@ import pandas as pd import threading import queue +import json class ExplorerRunException(Exception): @@ -140,6 +141,17 @@ def get_perf_trace(self, model_path): return pd.read_csv(op_perf_file) + def get_golden_results(self, model_path): + accuracy_res = f"{self.model_state[model_path].model_output_dir}/run/program_0/golden_results.json" + + if not os.path.exists(accuracy_res): + raise FileNotFoundError(f"Golden results not found @ {accuracy_res}") + + with open(accuracy_res, "r") as f: + res = json.load(f) + + return res + def run_in_subprocess(self, command): self.log(f"Running command:\n{' '.join(command)}\n") From 83ad2daae81aff2b75f44ccb7f848a5f39d327ff Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Fri, 14 Feb 2025 17:45:27 +0000 Subject: [PATCH 32/40] Clean --- python/TTNNModule.cpp | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/python/TTNNModule.cpp b/python/TTNNModule.cpp index d8d5ae66e7..3610c1d211 100644 --- a/python/TTNNModule.cpp +++ b/python/TTNNModule.cpp @@ -56,13 +56,7 @@ void populateTTNNModule(py::module &m) { }) .def_property_readonly("value", [](tt::ttnn::TensorMemoryLayoutAttr self) { - llvm::outs() << "Before\n"; - auto layout = self.getValue(); - llvm::outs() << "Got the Layout\n"; - auto casted = static_cast(layout); - llvm::outs() << "Cast the Layout\n"; - - return casted; + return static_cast(self.getValue()); }); tt_attribute_class(m, "BufferTypeAttr") .def_static( From afef3786a9adba2c1aa87adef7d6a481a677bcbd Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Fri, 21 Feb 2025 23:28:01 +0000 Subject: [PATCH 33/40] Style Changes + Added Accuracy Test --- .github/workflows/build-and-test.yml | 2 +- tools/explorer/test/run_tests.py | 42 +++++++++++++++++-- .../tt_adapter/src/tt_adapter/main.py | 10 +---- .../tt_adapter/src/tt_adapter/mlir.py | 22 ++++------ 4 files changed, 49 insertions(+), 27 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index d6434d85e3..12992cb8ab 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -641,7 +641,7 @@ jobs: shell: bash run: | source env/activate - export TT_EXPLORER_GENERATED_MLIR_TEST_DIRS=${{ steps.strings.outputs.build-output-dir }}/test/ttmlir/Silicon/TTNN,${{ steps.strings.outputs.build-output-dir }}/test/python/golden/ttnn + export TT_EXPLORER_GENERATED_MLIR_TEST_DIRS=${{ steps.strings.outputs.build-output-dir }}/test/ttmlir/Silicon/TTNN/n150/perf,${{ steps.strings.outputs.build-output-dir }}/test/python/golden/ttnn export TT_EXPLORER_GENERATED_TTNN_TEST_DIRS=${{ steps.strings.outputs.build-output-dir }}/test/python/golden/ttnn pytest tools/explorer/test/run_tests.py # collect results diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index 5f23604a7f..f4162afb89 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -9,12 +9,14 @@ import pytest import glob import os +import logging HOST = "localhost" PORT = 8002 COMMAND_URL = "http://" + HOST + ":" + str(PORT) + "/apipost/v1/send_command" TEST_LOAD_MODEL_PATHS = [ "test/ttmlir/Explorer/**/*.mlir", + # Need to remove the Transforms directory from here "test/ttmlir/Silicon/TTNN/n150/perf/**/*.mlir", ] MNIST_SHARDING_PATH = "test/ttmlir/Silicon/TTNN/n150/optimizer/mnist_sharding.mlir" @@ -24,12 +26,29 @@ if "TT_EXPLORER_GENERATED_MLIR_TEST_DIRS" in os.environ: for path in os.environ["TT_EXPLORER_GENERATED_MLIR_TEST_DIRS"].split(","): - TEST_LOAD_MODEL_PATHS.append(path + "/**/*.mlir") + if os.path.exists(path): + TEST_LOAD_MODEL_PATHS.append(path + "/**/*.mlir") + else: + logging.error( + "Path %s provided in TT_EXPLORER_GENERED_MLIR_TEST_DIRS doesn't exist. Tests not added.", + path, + ) if "TT_EXPLORER_GENERATED_TTNN_TEST_DIRS" in os.environ: for path in os.environ["TT_EXPLORER_GENERATED_TTNN_TEST_DIRS"].split(","): - TEST_LOAD_MODEL_PATHS.append(path + "/**/*.ttnn") - TEST_EXECUTE_MODEL_PATHS.append(path + "/**/*.ttnn") + if os.path.exists(path): + TEST_LOAD_MODEL_PATHS.append(path + "/**/*.ttnn") + TEST_EXECUTE_MODEL_PATHS.append(path + "/**/*.ttnn") + else: + logging.error( + "Path %s provided in TT_EXPLORER_GENERED_TTNN_TEST_DIRS doesn't exist. Tests not added.", + path, + ) + +GET_TTNN_TEST = lambda: ( + [None] + + [test for test in TEST_EXECUTE_MODEL_PATHS if test.endswith("test_mnist.ttnn")] +)[-1] def get_test_files(paths): @@ -188,7 +207,7 @@ def test_execute_and_check_perf_data_exists(): timeout=300, ) result = convert_command_and_assert(MNIST_SHARDING_PATH) - assert "perf_data" in result["graphs"][0] + assert "perf_data" in result["graphs"][0]["overlays"] def test_execute_model_invalid_policy(): @@ -198,3 +217,18 @@ def test_execute_model_invalid_policy(): {"optimizationPolicy": "Invalid Policy"}, timeout=300, ) + + +def test_execute_and_check_accuracy_data_exists(): + # Get the test_mnist path + test_mnist_path = GET_TTNN_TEST() + + # Key Decision: Make Test Fail or just provide error message and skip? + assert ( + test_mnist_path is not None + ), "Couldn't find test_mnist.ttnn in GENERATED_TTNN_TEST_DIRS" + execute_command_and_wait( + test_mnist_path, {"optimizationPolicy": "Optimizer Disabled"}, timeout=300 + ) + result = convert_command_and_assert(test_mnist_path) + assert "accuracy_data" in result["graphs"][0]["overlays"] diff --git a/tools/explorer/tt_adapter/src/tt_adapter/main.py b/tools/explorer/tt_adapter/src/tt_adapter/main.py index 3d3b92e999..80fe90caa0 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/main.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/main.py @@ -101,15 +101,7 @@ def convert( module = utils.parse_mlir_str(model_file.read()) # Convert TTIR to Model Explorer Graphs and Display/Return - graph, perf_data, accuracy_data = mlir.build_graph( - module, perf_trace, golden_results - ) - overlays = {} - if perf_data: - overlays["perf_data"] = perf_data.graphsData - - if accuracy_data: - overlays["accuracy_data"] = accuracy_data.graphsData + graph, overlays = mlir.build_graph(module, perf_trace, golden_results) if overlays: graph = utils.add_to_dataclass(graph, "overlays", overlays) diff --git a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py index f37090b938..39835b4863 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py @@ -306,15 +306,12 @@ def parse_memory_config(attr): ) ) - my_layout = memory_config.tensor_memory_layout - my_val = int(my_layout.value) - my_mlir_layout = ttnn.TensorMemoryLayout(my_val) - my_mlir_layout_repr = str(my_mlir_layout) - result.append( graph_builder.KeyValue( key="tensor-memory-layout", - value=my_mlir_layout_repr, + value=str( + ttnn.TensorMemoryLayout(int(memory_config.tensor_memory_layout.value)) + ), ) ) @@ -729,8 +726,8 @@ def build_graph(module, perf_trace=None, golden_results=None): ) output_connections[source_node.id] += 1 + overlays = {} # Add performance data to the graph color overlay, if it exists - perf_data = None if perf_node_data: gradient = [ node_data_builder.GradientItem(stop=0, bgColor="yellow"), @@ -739,11 +736,10 @@ def build_graph(module, perf_trace=None, golden_results=None): graph_node_data = node_data_builder.GraphNodeData( results=perf_node_data, gradient=gradient ) - perf_data = node_data_builder.ModelNodeData( + overlays["perf_data"] = node_data_builder.ModelNodeData( graphsData={"tt-graph": graph_node_data} - ) + ).graphsData - accuracy_data = None if accuracy_node_data: thres = [ # Show Green if ActualPCC - ExpectedPCC is 1 and below (Actual PCC >= ExpectedPCC) @@ -754,9 +750,9 @@ def build_graph(module, perf_trace=None, golden_results=None): graph_node_data = node_data_builder.GraphNodeData( results=accuracy_node_data, thresholds=thres ) - accuracy_data = node_data_builder.ModelNodeData( + overlays["accuracy_data"] = node_data_builder.ModelNodeData( graphsData={"tt-graph": graph_node_data} - ) + ).graphsData graph.groupNodeAttributes = group_node_attrs - return graph, perf_data, accuracy_data + return graph, overlays From 1b883fb25fdaea26cd309ab3c83ce1c7dd2eff92 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Mon, 24 Feb 2025 18:41:27 +0000 Subject: [PATCH 34/40] Working Changes to Tests --- tools/explorer/test/run_tests.py | 13 +++++++------ tools/explorer/tt_adapter/src/tt_adapter/main.py | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index f4162afb89..db807e8034 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -16,7 +16,6 @@ COMMAND_URL = "http://" + HOST + ":" + str(PORT) + "/apipost/v1/send_command" TEST_LOAD_MODEL_PATHS = [ "test/ttmlir/Explorer/**/*.mlir", - # Need to remove the Transforms directory from here "test/ttmlir/Silicon/TTNN/n150/perf/**/*.mlir", ] MNIST_SHARDING_PATH = "test/ttmlir/Silicon/TTNN/n150/optimizer/mnist_sharding.mlir" @@ -45,11 +44,6 @@ path, ) -GET_TTNN_TEST = lambda: ( - [None] - + [test for test in TEST_EXECUTE_MODEL_PATHS if test.endswith("test_mnist.ttnn")] -)[-1] - def get_test_files(paths): files = [] @@ -58,6 +52,13 @@ def get_test_files(paths): return files +def GET_TTNN_TEST(): + for test in get_test_files(TEST_EXECUTE_MODEL_PATHS): + if test.endswith("test_mnist.ttnn"): + return test + return None + + @pytest.fixture(scope="function", autouse=True) def start_server(request): """Start the model explorer server before running tests and stop it after.""" diff --git a/tools/explorer/tt_adapter/src/tt_adapter/main.py b/tools/explorer/tt_adapter/src/tt_adapter/main.py index 80fe90caa0..50128ff5be 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/main.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/main.py @@ -124,7 +124,7 @@ def convert( module = utils.parse_mlir_str(model_file.read()) # Convert TTIR to Model Explorer Graphs and Display/Return - graph, _, _ = mlir.build_graph(module) + graph, _ = mlir.build_graph(module) return {"graphs": [graph]} From d34cc60b862a1a9a18bc301a75e4055fc0606ad6 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Mon, 24 Feb 2025 21:03:42 +0000 Subject: [PATCH 35/40] Finally got this working, please don't break in CI --- runtime/tools/python/ttrt/common/callback.py | 2 +- runtime/tools/python/ttrt/common/perf.py | 9 +++++-- runtime/tools/python/ttrt/common/run.py | 8 ++++-- runtime/tools/python/ttrt/common/util.py | 26 +++++++++++++++++-- .../tt_adapter/src/tt_adapter/mlir.py | 4 +-- .../tt_adapter/src/tt_adapter/runner.py | 15 ++++++++--- 6 files changed, 52 insertions(+), 12 deletions(-) diff --git a/runtime/tools/python/ttrt/common/callback.py b/runtime/tools/python/ttrt/common/callback.py index 93a9af267b..627ad17823 100644 --- a/runtime/tools/python/ttrt/common/callback.py +++ b/runtime/tools/python/ttrt/common/callback.py @@ -65,7 +65,7 @@ def save_memory_report(self, memory_report_path): def check_pcc(self): for loc, golden_data in self.golden_report.items(): if golden_data["actual_pcc"] < golden_data["expected_pcc"]: - raise Exception( + raise PCCErrorException( f"Failed: golden comparison failed, actual_pcc={golden_data['actual_pcc']} < expected_pcc={golden_data['expected_pcc']}" ) diff --git a/runtime/tools/python/ttrt/common/perf.py b/runtime/tools/python/ttrt/common/perf.py index 18791a777b..fd347c2f12 100644 --- a/runtime/tools/python/ttrt/common/perf.py +++ b/runtime/tools/python/ttrt/common/perf.py @@ -528,12 +528,17 @@ def signal_handler(sig, frame): for result in test_result: if result["result"] != "pass": + if result["result"] == "test_error": + raise TTRTTestException(str(result["exception"])) raise Exception(f'{result["exception"]}') except Exception as e: + result = "error" + if isinstance(e, TTRTTestException): + result = "test_error" test_result = { "file_path": bin.file_path, - "result": "error", + "result": result, "exception": str(e), "log_file": self.logger.file_name, "artifacts": self.artifacts.artifacts_folder_path, @@ -543,7 +548,7 @@ def signal_handler(sig, frame): f"ERROR: test={bin.file_path} experienced an error with exception={str(e)}" ) self.results.add_result(test_result) - bin.test_result = "error" + bin.test_result = result traceback.print_exc() continue diff --git a/runtime/tools/python/ttrt/common/run.py b/runtime/tools/python/ttrt/common/run.py index 84a06a9c4a..e830bdadfc 100644 --- a/runtime/tools/python/ttrt/common/run.py +++ b/runtime/tools/python/ttrt/common/run.py @@ -691,9 +691,13 @@ def convert_input_layouts(device, inputs, fbb, program_index): callback_runtime_config.check_memory_leak() except Exception as e: + result = "error" + if isinstance(e, TTRTTestException): + result = "test_error" + test_result = { "file_path": bin.file_path, - "result": "error", + "result": result, "exception": str(e), "log_file": self.logger.file_name, "artifacts": self.artifacts.artifacts_folder_path, @@ -703,7 +707,7 @@ def convert_input_layouts(device, inputs, fbb, program_index): f"ERROR: test={bin.file_path} experienced an error with exception={str(e)}" ) self.results.add_result(test_result) - bin.test_result = "error" + bin.test_result = result continue finally: ttrt.runtime.close_device(device) diff --git a/runtime/tools/python/ttrt/common/util.py b/runtime/tools/python/ttrt/common/util.py index 77c558a760..00e4499664 100644 --- a/runtime/tools/python/ttrt/common/util.py +++ b/runtime/tools/python/ttrt/common/util.py @@ -698,6 +698,22 @@ def __init__(self, logger, file_manager, file_path): self.test_result = "pass" +class TTRTTestException(Exception): + """ "Base class for all "Test Specific" Errors in TTRT""" + + pass + + +class PCCErrorException(TTRTTestException): + """Class to store PCC Comparison Errors""" + + pass + + +# Define a constant TTRT_TEST_ERROR_RETURN_CODE +TTRT_TEST_EXCEPTION_RETURN_CODE = 42 + + class Results: def __init__(self, logger, file_manager): self.logger = logger @@ -750,11 +766,17 @@ def save_results(self, file_name="results.json"): tree.write(xml_file_path, encoding="utf-8", xml_declaration=True) def get_result_code(self): + return_code = 0 for entry in self.results: + res = entry.get("result") if entry.get("result") != "pass": - return 1 + if res == "test_error": + return_code = TTRT_TEST_EXCEPTION_RETURN_CODE + else: + # Prioritize severity of return_code 1 if any non-test errors are encountered + return 1 - return 0 + return return_code def get_results(self): return self.results diff --git a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py index c8fc36975e..0958393010 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py @@ -754,10 +754,10 @@ def build_graph(module, perf_trace=None, golden_results=None): if accuracy_node_data: thres = [ - # Show Green if ActualPCC - ExpectedPCC is 1 and below (Actual PCC >= ExpectedPCC) - node_data_builder.ThresholdItem(value=1, bgColor="green"), # Show Red if ActualPCC - ExpectedPCC is 0 and below (ActualPCC < ExpectedPCC) node_data_builder.ThresholdItem(value=0, bgColor="red"), + # Show Green if ActualPCC - ExpectedPCC is 1 and below (Actual PCC >= ExpectedPCC) + node_data_builder.ThresholdItem(value=1, bgColor="green"), ] graph_node_data = node_data_builder.GraphNodeData( results=accuracy_node_data, thresholds=thres diff --git a/tools/explorer/tt_adapter/src/tt_adapter/runner.py b/tools/explorer/tt_adapter/src/tt_adapter/runner.py index 6a25fe6077..9a777bf181 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/runner.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/runner.py @@ -316,9 +316,18 @@ def compile_and_run(self, model_path, overrides_string): ttrt_process = self.run_in_subprocess(ttrt_perf_command) if ttrt_process.returncode != 0: - error = "Error while running TTRT perf" - self.log(error, severity=logging.error) - raise ExplorerRunException(error) + # Check if test error or TTRT Error' + print("Error Encountered, the error Code is", ttrt_process.returncode) + # 42 is the specific code for a test error instead of ttrt + if ttrt_process.returncode == 42: + error = ( + "Error while running TTRT Tests... Continuing Explorer Execution" + ) + self.log(error, severity=logging.error) + else: + error = "Error while running TTRT perf" + self.log(error, severity=logging.error) + raise ExplorerRunException(error) perf = self.get_perf_trace(model_path) columns = [ From ea13fa69aa0133888e056480c1ac9f3fd4f27714 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Mon, 24 Feb 2025 22:55:17 +0000 Subject: [PATCH 36/40] It didn't work. --- test/python/golden/test_ttir_ops.py | 13 +++++++++++-- tools/explorer/test/run_tests.py | 6 ++++++ tools/explorer/tt_adapter/src/tt_adapter/runner.py | 2 -- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/test/python/golden/test_ttir_ops.py b/test/python/golden/test_ttir_ops.py index ad2ce48987..37115d922c 100644 --- a/test/python/golden/test_ttir_ops.py +++ b/test/python/golden/test_ttir_ops.py @@ -53,9 +53,11 @@ def test_logical_not(in0: Operand, builder: TTIRBuilder): # NOTE: The generated flatbuffer will currently fail to run due to only floats # being supported by the runtime. See issue #1775 for tracking +""" @compile_to_flatbuffer([(128, 128)], inputs_types=[torch.int8], targets=["ttnn"]) def test_bitwise_not(in0: Operand, builder: TTIRBuilder): return builder.bitwise_not(in0) +""" @compile_to_flatbuffer([(128, 128)], targets=["ttnn"]) @@ -217,6 +219,8 @@ def test_logical_xor(in0: Operand, in1: Operand, builder: TTIRBuilder): # NOTE: The generated flatbuffer will currently fail to run due to only floats # being supported by the runtime. See issue #1775 for tracking + +""" @compile_to_flatbuffer( [ (64, 64), @@ -227,10 +231,12 @@ def test_logical_xor(in0: Operand, in1: Operand, builder: TTIRBuilder): ) def test_bitwise_and(in0: Operand, in1: Operand, builder: TTIRBuilder): return builder.bitwise_and(in0, in1) - +""" # NOTE: The generated flatbuffer will currently fail to run due to only floats # being supported by the runtime. See issue #1775 for tracking + +""" @compile_to_flatbuffer( [ (64, 64), @@ -241,10 +247,12 @@ def test_bitwise_and(in0: Operand, in1: Operand, builder: TTIRBuilder): ) def test_bitwise_or(in0: Operand, in1: Operand, builder: TTIRBuilder): return builder.bitwise_or(in0, in1) - +""" # NOTE: The generated flatbuffer will currently fail to run due to only floats # being supported by the runtime. See issue #1775 for tracking + +""" @compile_to_flatbuffer( [ (64, 64), @@ -255,6 +263,7 @@ def test_bitwise_or(in0: Operand, in1: Operand, builder: TTIRBuilder): ) def test_bitwise_xor(in0: Operand, in1: Operand, builder: TTIRBuilder): return builder.bitwise_xor(in0, in1) +""" @compile_to_flatbuffer( diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index db807e8034..72120e77ab 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -23,6 +23,8 @@ MNIST_SHARDING_PATH, ] +INVALID_TESTS = ["test_unsqueeze.ttnn", "test_squeeze.ttnn"] + if "TT_EXPLORER_GENERATED_MLIR_TEST_DIRS" in os.environ: for path in os.environ["TT_EXPLORER_GENERATED_MLIR_TEST_DIRS"].split(","): if os.path.exists(path): @@ -49,6 +51,10 @@ def get_test_files(paths): files = [] for path in paths: files.extend(glob.glob(path, recursive=True)) + + # filter for invalid tests + files = [file for file in files if all(not file.endswith(x) for x in INVALID_TESTS)] + return files diff --git a/tools/explorer/tt_adapter/src/tt_adapter/runner.py b/tools/explorer/tt_adapter/src/tt_adapter/runner.py index 9a777bf181..767e1b4b79 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/runner.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/runner.py @@ -316,8 +316,6 @@ def compile_and_run(self, model_path, overrides_string): ttrt_process = self.run_in_subprocess(ttrt_perf_command) if ttrt_process.returncode != 0: - # Check if test error or TTRT Error' - print("Error Encountered, the error Code is", ttrt_process.returncode) # 42 is the specific code for a test error instead of ttrt if ttrt_process.returncode == 42: error = ( From b84f042c9712713a5f1c7a4fe777a0eb4e4bcea3 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Tue, 25 Feb 2025 16:42:44 +0000 Subject: [PATCH 37/40] Removed Squeeze/Unsqueeze tests while bug is figured out --- test/python/golden/test_ttir_ops.py | 8 ++++++-- tools/explorer/test/run_tests.py | 5 ----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/test/python/golden/test_ttir_ops.py b/test/python/golden/test_ttir_ops.py index 37115d922c..e4d750e6c8 100644 --- a/test/python/golden/test_ttir_ops.py +++ b/test/python/golden/test_ttir_ops.py @@ -10,15 +10,19 @@ from ttmlir.test_utils import compile_to_flatbuffer, set_output_path from ttmlir.ttir_builder import Operand, TTIRBuilder, Attribute - +# NOTE: This test is not valid for TTRT Perf due to weird issues with perf collection +""" @compile_to_flatbuffer([(1, 128, 128, 1)], targets=["ttnn"]) def test_squeeze(in0: Operand, builder: TTIRBuilder): return builder.squeeze(in0, 0) +""" - +# NOTE: Same as Squeeze, this Op is not valid for TTRT Perf. +""" @compile_to_flatbuffer([(128, 128)], targets=["ttnn"]) def test_unsqueeze(in0: Operand, builder: TTIRBuilder): return builder.unsqueeze(in0, 0) +""" @compile_to_flatbuffer([(128, 128)], targets=["ttnn"]) diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index 72120e77ab..d577c784eb 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -23,8 +23,6 @@ MNIST_SHARDING_PATH, ] -INVALID_TESTS = ["test_unsqueeze.ttnn", "test_squeeze.ttnn"] - if "TT_EXPLORER_GENERATED_MLIR_TEST_DIRS" in os.environ: for path in os.environ["TT_EXPLORER_GENERATED_MLIR_TEST_DIRS"].split(","): if os.path.exists(path): @@ -52,9 +50,6 @@ def get_test_files(paths): for path in paths: files.extend(glob.glob(path, recursive=True)) - # filter for invalid tests - files = [file for file in files if all(not file.endswith(x) for x in INVALID_TESTS)] - return files From 967b166cf187570ac718a7ed4e43c29989872fcf Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 26 Feb 2025 15:44:23 +0000 Subject: [PATCH 38/40] I give up, CI debug printing --- tools/explorer/test/run_tests.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index d577c784eb..cf1887119a 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -233,4 +233,7 @@ def test_execute_and_check_accuracy_data_exists(): test_mnist_path, {"optimizationPolicy": "Optimizer Disabled"}, timeout=300 ) result = convert_command_and_assert(test_mnist_path) + if "accuracy_data" not in result["graphs"][0]["overlays"]: + overlays = result["graphs"][0]["overlays"] + print(overlays) assert "accuracy_data" in result["graphs"][0]["overlays"] From 3009b57b6ed3bbec36fa882f6136d1141e922476 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 26 Feb 2025 19:42:37 +0000 Subject: [PATCH 39/40] Made test more verbose --- .github/workflows/build-and-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 63691ddbb3..d118c7990b 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -732,7 +732,7 @@ jobs: source env/activate export TT_EXPLORER_GENERATED_MLIR_TEST_DIRS=${{ steps.strings.outputs.build-output-dir }}/test/ttmlir/Silicon/TTNN/n150/perf,${{ steps.strings.outputs.build-output-dir }}/test/python/golden/ttnn export TT_EXPLORER_GENERATED_TTNN_TEST_DIRS=${{ steps.strings.outputs.build-output-dir }}/test/python/golden/ttnn - pytest tools/explorer/test/run_tests.py + pytest -svv tools/explorer/test/run_tests.py # collect results From 3a3c9dfda8fa283612fde4501b5544cfac609d98 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 26 Feb 2025 21:30:29 +0000 Subject: [PATCH 40/40] small changes + removed llama test --- test/python/golden/test_ttir_models.py | 17 ++++++++++++++++- test/python/golden/test_ttir_ops.py | 2 +- tools/explorer/test/run_tests.py | 14 +++++++++++--- 3 files changed, 28 insertions(+), 5 deletions(-) diff --git a/test/python/golden/test_ttir_models.py b/test/python/golden/test_ttir_models.py index 33f49b76f4..a516d39d1b 100644 --- a/test/python/golden/test_ttir_models.py +++ b/test/python/golden/test_ttir_models.py @@ -6,7 +6,7 @@ import inspect -from ttmlir.test_utils import compile_to_flatbuffer +from ttmlir.test_utils import compile_to_flatbuffer, set_output_path from ttmlir.ttir_builder import Operand, TTIRBuilder @@ -139,6 +139,21 @@ def test_llama_attention( if __name__ == "__main__": + import argparse, os + + parser = argparse.ArgumentParser(description="Run TTIR Builder Model tests") + parser.add_argument( + "--path", + type=str, + help="Optional output path for the flatbuffer. Creates path if supplied path doesn't exist", + ) + args = parser.parse_args() + + if args.path and os.path.exists(args.path): + if not os.path.exists(args.path): + os.makedirs(args.path) + set_output_path(args.path) + test_functions = inspect.getmembers( inspect.getmodule(inspect.currentframe()), inspect.isfunction ) diff --git a/test/python/golden/test_ttir_ops.py b/test/python/golden/test_ttir_ops.py index 967ab8bf60..8e7bffb5b1 100644 --- a/test/python/golden/test_ttir_ops.py +++ b/test/python/golden/test_ttir_ops.py @@ -465,7 +465,7 @@ def test_arbitrary_op_chain( if __name__ == "__main__": import argparse, os - parser = argparse.ArgumentParser(description="Run TTIR tests") + parser = argparse.ArgumentParser(description="Run TTIR Builder Op tests") parser.add_argument( "--path", type=str, diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index cf1887119a..d834444eef 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -44,12 +44,21 @@ path, ) +FILTERED_TESTS = [ + # This test is way too large to fit reasonably in CI. + "test_llama_attention.ttnn", +] + def get_test_files(paths): files = [] for path in paths: files.extend(glob.glob(path, recursive=True)) + files = [ + file for file in files if all(not file.endswith(x) for x in FILTERED_TESTS) + ] + return files @@ -234,6 +243,5 @@ def test_execute_and_check_accuracy_data_exists(): ) result = convert_command_and_assert(test_mnist_path) if "accuracy_data" not in result["graphs"][0]["overlays"]: - overlays = result["graphs"][0]["overlays"] - print(overlays) - assert "accuracy_data" in result["graphs"][0]["overlays"] + print(result) + assert "accuracy_data" in str(result)