From 317aa3c27fdd557757bd2c222ad0eb7819ed3ad1 Mon Sep 17 00:00:00 2001 From: moneta Date: Wed, 29 Jan 2025 11:24:39 +0100 Subject: [PATCH] [tmva][sofie] Update in README list of new implemented operators Fix also sone compilation wanrings and remove some commented code --- tmva/sofie/README.md | 9 +++++++++ tmva/sofie/inc/TMVA/ROperator_BasicBinary.hxx | 9 +-------- tmva/sofie/inc/TMVA/ROperator_Einsum.hxx | 5 +++-- tmva/sofie/inc/TMVA/ROperator_Gemm.hxx | 3 --- tmva/sofie/inc/TMVA/ROperator_Split.hxx | 7 +------ tmva/sofie/inc/TMVA/SOFIE_common.hxx | 2 +- 6 files changed, 15 insertions(+), 20 deletions(-) diff --git a/tmva/sofie/README.md b/tmva/sofie/README.md index a2143d424bf89..04ade32e00c72 100644 --- a/tmva/sofie/README.md +++ b/tmva/sofie/README.md @@ -82,6 +82,8 @@ Here is the updated list of supported ONNX operators - [x] ConstantOfShape - [x] Conv - [x] ConvTranspose +- [x] Cos +- [x] Einsum - [x] Elu - [x] Equal - [x] Erf @@ -110,9 +112,14 @@ Here is the updated list of supported ONNX operators - [x] Min - [x] Mul - [x] Neg +- [x] Pad - [x] Pool - [x] Pow - [x] Range +- [x] RandomNormal +- [x] RandomNormalLike +- [x] RandomUniform +- [x] RandomUniformLike - [x] Reciprocal - [x] ReduceMean - [x] ReduceProd @@ -123,6 +130,7 @@ Here is the updated list of supported ONNX operators - [x] RNN - [x] Selu - [x] Sigmoid +- [x] Sin - [x] Slice - [x] Softmax - [x] Split @@ -133,6 +141,7 @@ Here is the updated list of supported ONNX operators - [x] TopK - [x] Transpose - [x] Unsqueeze +- [x] Where The above operators are supported for tensors of the following types: diff --git a/tmva/sofie/inc/TMVA/ROperator_BasicBinary.hxx b/tmva/sofie/inc/TMVA/ROperator_BasicBinary.hxx index e01f2ba8de44b..975c3a48c3521 100644 --- a/tmva/sofie/inc/TMVA/ROperator_BasicBinary.hxx +++ b/tmva/sofie/inc/TMVA/ROperator_BasicBinary.hxx @@ -180,24 +180,17 @@ public: size_t length = ConvertShapeToLength(fShapeY); std::string typeName = TensorType::Name(); // Broadcast A if it's uninitialized + // use broadcasting function where we pass an already allocated tensor to minimize memory allocations if (fShapeA != fShapeY) { out << SP << "// Broadcasting uninitialized tensor " << fNA << "\n"; - //out << SP << "{\n"; out << SP << "TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<" << typeName << ">(tensor_" << fNA << ", " << ConvertShapeToString(fShapeA) << ", " << ConvertShapeToString(fShapeY) << ", fTensor_" << fNBroadcastedA << ");\n"; - //out << SP << SP << "std::copy(data, data + " << length << ", tensor_" << fNBroadcastedA << ");\n"; - //out << SP << SP << "delete[] data;\n"; - //out << SP << "}\n"; } // Broadcast B if it's uninitialized if (fShapeB != fShapeY) { out << SP << "// Broadcasting uninitialized tensor " << fNB << "\n"; - //out << SP << "{\n"; out << SP << "TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<" << typeName << ">(tensor_" << fNB << ", " << ConvertShapeToString(fShapeB) << ", " << ConvertShapeToString(fShapeY) << ", fTensor_" << fNBroadcastedB << ");\n"; - //out << SP << SP << "std::copy(data, data + " << length << ", tensor_" << fNBroadcastedB << ");\n"; - //out << SP << SP << "delete[] data;\n"; - //out << SP << "}\n"; } const std::string& nameA = fNBroadcastedA.empty()? fNA : fNBroadcastedA; const std::string& nameB = fNBroadcastedB.empty()? fNB : fNBroadcastedB; diff --git a/tmva/sofie/inc/TMVA/ROperator_Einsum.hxx b/tmva/sofie/inc/TMVA/ROperator_Einsum.hxx index 14f77f19bc6b4..0f31214335f44 100644 --- a/tmva/sofie/inc/TMVA/ROperator_Einsum.hxx +++ b/tmva/sofie/inc/TMVA/ROperator_Einsum.hxx @@ -6,6 +6,7 @@ #include "TMVA/RModel.hxx" #include +#include namespace TMVA{ namespace Experimental{ @@ -230,8 +231,8 @@ public: if (fGemmType.empty()) { int outDims = fShapeY.size(); int inDims = fSumLabels.length(); - assert(outDims == fOutputLabels.size()); - assert(inDims == fSumDims.size()); + assert(outDims == int(fOutputLabels.size())); + assert(inDims == int(fSumDims.size())); for (int i = 0; i < outDims; i++) { for (int j = 0; j < i; j++) out << SP; std::string l {fOutputLabels[i]}; diff --git a/tmva/sofie/inc/TMVA/ROperator_Gemm.hxx b/tmva/sofie/inc/TMVA/ROperator_Gemm.hxx index 19d4b83d8ab0b..5873580717ea2 100644 --- a/tmva/sofie/inc/TMVA/ROperator_Gemm.hxx +++ b/tmva/sofie/inc/TMVA/ROperator_Gemm.hxx @@ -182,9 +182,6 @@ namespace SOFIE{ } } - // throw std::runtime_error("TMVA SOFIE Gemm Op Input Tensors have not compatible shapes. A " + - // ConvertDynamicShapeToString(fShapeA) + " B " + ConvertDynamicShapeToString(fShapeB) ); - fShapeY = DynamicShapeInference({fShapeA, fShapeB})[0]; std::vector shapeY; if (!fIsDynamic) { diff --git a/tmva/sofie/inc/TMVA/ROperator_Split.hxx b/tmva/sofie/inc/TMVA/ROperator_Split.hxx index e6d41a9bdcd4b..48d0242517b7c 100644 --- a/tmva/sofie/inc/TMVA/ROperator_Split.hxx +++ b/tmva/sofie/inc/TMVA/ROperator_Split.hxx @@ -108,12 +108,7 @@ public: throw std::runtime_error("TMVA SOFIE Operator Split called to Generate without being initialized first"); } - // compute input and output strides auto input_strides = UTILITY::ComputeStrideFromShape(fInputShape); - std::vector> output_strides; - for (size_t i = 0; i < fOutputShapes.size(); i++) { - output_strides.emplace_back( UTILITY::ComputeStrideFromShape(fOutputShapes[i])); - } // generate now the code for split std::stringstream out; @@ -121,7 +116,7 @@ public: out << SP << "size_t " << OpName << "_axis_offset = 0;\n"; // unroll the loop on split outputs for (size_t i = 0; i < fNYs.size(); i++) { - int length = ConvertShapeToLength(fOutputShapes[i]); + size_t length = ConvertShapeToLength(fOutputShapes[i]); auto output_strides = UTILITY::ComputeStrideFromShape(fOutputShapes[i]); out << SP << "for (int id = 0; id < " << length << " ; id++){\n"; diff --git a/tmva/sofie/inc/TMVA/SOFIE_common.hxx b/tmva/sofie/inc/TMVA/SOFIE_common.hxx index 8d96449f49d95..bfab06344c730 100644 --- a/tmva/sofie/inc/TMVA/SOFIE_common.hxx +++ b/tmva/sofie/inc/TMVA/SOFIE_common.hxx @@ -157,7 +157,7 @@ public: bool IsConstantTensor() const { return fConstant;} // query if tensor needs to be written in a weight file. Constant tensors are not written in a file bool IsWeightTensor() const { return !fConstant && !fIsNotWritable;} - + // set not writable initialized tensors - i.e. tensor that must not be written in a file void SetNotWritable() { fIsNotWritable = true;} template